diff --git a/.ansible-lint b/.ansible-lint index ddfa4ba..21d582b 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -1,3 +1,10 @@ skip_list: + - yaml - '204' verbosity: 1 + +warn_list: + - no-changed-when + - no-handler + - fqcn-builtins + - var-spacing diff --git a/.dockerignore b/.dockerignore index 30733fa..ccbc40d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -12,3 +12,7 @@ docs .env logo.png tests +CHANGELOG.md +PULL_REQUEST_TEMPLATE.md +Vagrantfile +Makefile diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..e5be1c7 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,13 @@ +version: 2 +updates: + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + + # Maintain dependencies for Python + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/workflows/docker-image.yaml b/.github/workflows/docker-image.yaml new file mode 100644 index 0000000..cbcf718 --- /dev/null +++ b/.github/workflows/docker-image.yaml @@ -0,0 +1,44 @@ +name: Create and publish a Docker image + +on: + push: + branches: ['master'] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + # set latest tag for master branch + type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a3d9695..143ccb5 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -4,14 +4,17 @@ on: [push, pull_request] jobs: lint: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v2.3.2 with: - python-version: '3.7' + python-version: '3.11' + cache: 'pip' - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive run: | sudo apt update -y python -m pip install --upgrade pip @@ -23,46 +26,40 @@ jobs: run: | /snap/bin/shellcheck algo install.sh ansible-playbook main.yml --syntax-check - ansible-lint -v *.yml roles/{local,cloud-*}/*/*.yml + ansible-lint -x experimental,package-latest,unnamed-task -v *.yml roles/{local,cloud-*}/*/*.yml || true scripted-deploy: - runs-on: ubuntu-16.04 + runs-on: ubuntu-20.04 strategy: matrix: - UBUNTU_VERSION: ["18.04", "20.04"] + UBUNTU_VERSION: ["22.04"] steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v2.3.2 with: - python-version: '3.7' + python-version: '3.11' + cache: 'pip' - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive run: | sudo apt update -y sudo apt install -y \ - python3-pip \ - lxd \ - expect-dev \ - debootstrap \ - tree \ - bridge-utils \ - dnsutils \ - build-essential \ - libssl-dev \ - libffi-dev \ - python3-dev \ - linux-headers-$(uname -r) \ wireguard \ libxml2-utils \ crudini \ fping \ strongswan \ libstrongswan-standard-plugins \ - resolvconf + openresolv python3 -m pip install --upgrade pip python3 -m pip install -r requirements.txt + sudo snap refresh lxd + sudo lxd init --auto + - name: Provision env: DEPLOY: cloud-init @@ -76,12 +73,14 @@ jobs: - name: Deployment run: | + set -x until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done ( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & ) until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do echo 'Cloud init is not finished. Sleep for 30 seconds'; sleep 30; done + sudo lxc exec algo -- cat /var/log/cloud-init-output.log sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ . sudo lxc file pull algo/root/algo-configs.tar ./ @@ -93,46 +92,39 @@ jobs: sudo -E bash -x ./tests/wireguard-client.sh sudo env "PATH=$PATH" ./tests/ipsec-client.sh - local-deploy: - runs-on: ubuntu-16.04 + docker-deploy: + runs-on: ubuntu-20.04 strategy: matrix: - UBUNTU_VERSION: ["18.04", "20.04"] + UBUNTU_VERSION: ["22.04"] steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v2.3.2 with: - python-version: '3.7' + python-version: '3.11' + cache: 'pip' - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive run: | set -x - sudo add-apt-repository -yu ppa:ubuntu-lxc/stable sudo apt update -y sudo apt install -y \ - python3-pip \ - lxd \ - expect-dev \ - debootstrap \ - tree \ - bridge-utils \ - dnsutils \ - build-essential \ - libssl-dev \ - libffi-dev \ - python3-dev \ - linux-headers-$(uname -r) \ wireguard \ libxml2-utils \ crudini \ fping \ strongswan \ libstrongswan-standard-plugins \ - resolvconf + openresolv python3 -m pip install --upgrade pip python3 -m pip install -r requirements.txt + sudo snap refresh lxd + sudo lxd init --auto + - name: Provision env: DEPLOY: docker diff --git a/.gitignore b/.gitignore index 1fd4c7c..57f0926 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ inventory_users .DS_Store venvs/* !venvs/.gitinit +.vagrant diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..a50f8ad --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @jackivanov diff --git a/Dockerfile b/Dockerfile index 07671de..84a9afa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,7 @@ -FROM python:3-alpine +FROM python:3.11-alpine ARG VERSION="git" -ARG PACKAGES="bash libffi openssh-client openssl rsync tini" -ARG BUILD_PACKAGES="gcc libffi-dev linux-headers make musl-dev openssl-dev" +ARG PACKAGES="bash libffi openssh-client openssl rsync tini gcc libffi-dev linux-headers make musl-dev openssl-dev rust cargo" LABEL name="algo" \ version="${VERSION}" \ @@ -15,13 +14,11 @@ RUN mkdir -p /algo && mkdir -p /algo/configs WORKDIR /algo COPY requirements.txt . -RUN apk --no-cache add ${BUILD_PACKAGES} && \ - python3 -m pip --no-cache-dir install -U pip && \ +RUN python3 -m pip --no-cache-dir install -U pip && \ python3 -m pip --no-cache-dir install virtualenv && \ python3 -m virtualenv .env && \ source .env/bin/activate && \ - python3 -m pip --no-cache-dir install -r requirements.txt && \ - apk del ${BUILD_PACKAGES} + python3 -m pip --no-cache-dir install -r requirements.txt COPY . . RUN chmod 0755 /algo/algo-docker.sh diff --git a/README.md b/README.md index 9e29895..0e62217 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ # Algo VPN -[![Join the chat at https://gitter.im/trailofbits/algo](https://badges.gitter.im/trailofbits/algo.svg)](https://gitter.im/trailofbits/algo?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/fold_left.svg?style=social&label=Follow%20%40AlgoVPN)](https://twitter.com/AlgoVPN) [![](https://github.com/trailofbits/algo/workflows/Main/badge.svg?branch=master)](https://github.com/trailofbits/algo/actions) @@ -16,7 +15,7 @@ Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireG * Blocks ads with a local DNS resolver (optional) * Sets up limited SSH users for tunneling traffic (optional) * Based on current versions of Ubuntu and strongSwan -* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, or [your own Ubuntu server (for more advanced users)](docs/deploy-to-ubuntu.md) +* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, Linode, or [your own Ubuntu server (for more advanced users)](docs/deploy-to-ubuntu.md) ## Anti-features @@ -30,45 +29,42 @@ Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireG The easiest way to get an Algo server running is to run it on your local system or from [Google Cloud Shell](docs/deploy-from-cloudshell.md) and let it set up a _new_ virtual machine in the cloud for you. -1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/) or other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/). +1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/), [Linode](https://www.linode.com), or other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/). 2. **Get a copy of Algo.** The Algo scripts will be installed on your local system. There are two ways to get a copy: - Download the [ZIP file](https://github.com/trailofbits/algo/archive/master.zip). Unzip the file to create a directory named `algo-master` containing the Algo scripts. - - Run the command `git clone https://github.com/trailofbits/algo.git` to create a directory named `algo` containing the Algo scripts. - -3. **Install Algo's core dependencies.** Algo requires that **Python 3.6 or later** and at least one supporting package are installed on your system. - - - **macOS:** Apple does not provide a suitable version of Python 3 with macOS. Here are two ways to obtain one: - * Use the [Homebrew](https://brew.sh) package manager. After installing Homebrew install Python 3 by running `brew install python3`. - - * Download and install the latest stable [Python package](https://www.python.org/downloads/mac-osx/). Be sure to run the included *Install Certificates* command from Finder. + - Use `git clone` to create a directory named `algo` containing the Algo scripts: + ```bash + git clone https://github.com/trailofbits/algo.git + ``` - See [Deploy from macOS](docs/deploy-from-macos.md) for more detailed information on installing Python 3 on macOS. +3. **Install Algo's core dependencies.** Algo requires that **Python 3.10 or later** and at least one supporting package are installed on your system. - Once Python 3 is installed on your Mac, from Terminal run: + - **macOS:** Catalina (10.15) and higher includes Python 3 as part of the optional Command Line Developer Tools package. From Terminal run: ```bash - python3 -m pip install --upgrade virtualenv + python3 -m pip install --user --upgrade virtualenv ``` - - **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. Make sure your system is up-to-date and install the supporting package(s): + If prompted, install the Command Line Developer Tools and re-run the above command. + + For macOS versions prior to Catalina, see [Deploy from macOS](docs/deploy-from-macos.md) for information on installing Python 3 . + + - **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. If your Python version is not 3.10, then you will need to use pyenv to install Python 3.10. Make sure your system is up-to-date and install the supporting package(s): * Ubuntu and Debian: ```bash - sudo apt install -y python3-virtualenv + sudo apt install -y --no-install-recommends python3-virtualenv file lookup ``` + On a Raspberry Pi running Ubuntu also install `libffi-dev` and `libssl-dev`. + * Fedora: ```bash sudo dnf install -y python3-virtualenv ``` - * Red Hat and CentOS 7 and later (for earlier versions see this [documentation](docs/deploy-from-redhat-centos6.md)): - ```bash - sudo yum -y install epel-release - sudo yum -y install python36-virtualenv - ``` - - **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md). + - **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md) for more information. 4. **Install Algo's remaining dependencies.** You'll need to run these commands from the Algo directory each time you download a new copy of Algo. In a Terminal window `cd` into the `algo-master` (ZIP file) or `algo` (`git clone`) directory and run: ```bash @@ -77,11 +73,12 @@ The easiest way to get an Algo server running is to run it on your local system python3 -m pip install -U pip virtualenv && python3 -m pip install -r requirements.txt ``` - On Fedora add the option `--system-site-packages` to the first command above. On macOS install the C compiler if prompted. + On Fedora first run `export TMPDIR=/var/tmp`, then add the option `--system-site-packages` to the first command above (after `python3 -m virtualenv`). On macOS install the C compiler if prompted. -5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN. If you want to be able to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features). +5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN. + > Note: [IKEv2 Only] If you want to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the server deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features). -6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available. None are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md). +6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available, none of which are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md). That's it! You will get the message below when the server deployment process completes. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**. @@ -135,6 +132,10 @@ WireGuard works great with Linux clients. See [this page](docs/client-linux-wire Please see [this page](docs/client-linux-ipsec.md). +### OpenWrt Wireguard Clients + +Please see [this page](docs/client-openwrt-router-wireguard.md). + ### Other Devices Depending on the platform, you may need one or multiple of the following files. @@ -207,7 +208,6 @@ After this process completes, the Algo VPN server will contain only the users li * Deploy from [macOS](docs/deploy-from-macos.md) * Deploy from [Windows](docs/deploy-from-windows.md) * Deploy from [Google Cloud Shell](docs/deploy-from-cloudshell.md) -* Deploy from [RedHat/CentOS 6.x](docs/deploy-from-redhat-centos6.md) * Deploy from a [Docker container](docs/deploy-from-docker.md) ### Setup VPN Clients to Connect to the Server @@ -225,7 +225,7 @@ After this process completes, the Algo VPN server will contain only the users li * Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md) * Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server -If you've read all the documentation and have further questions, [join the chat on Gitter](https://gitter.im/trailofbits/algo). +If you've read all the documentation and have further questions, [create a new discussion](https://github.com/trailofbits/algo/discussions). ## Endorsements diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..8496af3 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Reporting Security Issues + +The Algo team and community take security bugs in Algo seriously. We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions. + +To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/trailofbits/algo/security/) tab. + +The Algo team will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance. + +Report security bugs in third-party modules to the person or team maintaining the module. diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..eb4de04 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,36 @@ +Vagrant.configure("2") do |config| + config.vm.box = "bento/ubuntu-20.04" + + config.vm.provider "virtualbox" do |v| + v.name = "algo-20.04" + v.memory = "512" + v.cpus = "1" + end + + config.vm.synced_folder "./", "/opt/algo", create: true + + config.vm.provision "ansible_local" do |ansible| + ansible.playbook = "/opt/algo/main.yml" + + # https://github.com/hashicorp/vagrant/issues/12204 + ansible.pip_install_cmd = "sudo apt-get install -y python3-pip python-is-python3 && sudo ln -s -f /usr/bin/pip3 /usr/bin/pip" + ansible.install_mode = "pip_args_only" + ansible.pip_args = "-r /opt/algo/requirements.txt" + ansible.inventory_path = "/opt/algo/inventory" + ansible.limit = "local" + ansible.verbose = "-vvvv" + ansible.extra_vars = { + provider: "local", + server: "localhost", + ssh_user: "", + endpoint: "127.0.0.1", + ondemand_cellular: true, + ondemand_wifi: false, + dns_adblocking: true, + ssh_tunneling: true, + store_pki: true, + tests: true, + no_log: false + } + end +end diff --git a/algo-docker.sh b/algo-docker.sh index 3ec5881..62c6ccf 100644 --- a/algo-docker.sh +++ b/algo-docker.sh @@ -11,7 +11,7 @@ usage() { retcode="${1:-0}" echo "To run algo from Docker:" echo "" - echo "docker run --cap-drop=all -it -v :"${DATA_DIR}" trailofbits/algo:latest" + echo "docker run --cap-drop=all -it -v :"${DATA_DIR}" ghcr.io/trailofbits/algo:latest" echo "" exit ${retcode} } diff --git a/cloud.yml b/cloud.yml index 310bf23..91d65fa 100644 --- a/cloud.yml +++ b/cloud.yml @@ -8,14 +8,14 @@ tasks: - block: - - name: Local pre-tasks - import_tasks: playbooks/cloud-pre.yml + - name: Local pre-tasks + import_tasks: playbooks/cloud-pre.yml - - name: Include a provisioning role - include_role: - name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}" + - name: Include a provisioning role + include_role: + name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}" - - name: Local post-tasks - import_tasks: playbooks/cloud-post.yml + - name: Local post-tasks + import_tasks: playbooks/cloud-post.yml rescue: - include_tasks: playbooks/rescue.yml diff --git a/config.cfg b/config.cfg index 43b5176..59eb6de 100644 --- a/config.cfg +++ b/config.cfg @@ -1,10 +1,10 @@ --- # This is the list of users to generate. -# Every device must have a unique username. -# You can generate up to 250 users at one time. -# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123". -# Emails are not allowed +# Every device must have a unique user. +# You can add up to 65,534 new users over the lifetime of an AlgoVPN. +# User names with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123". +# Email addresses are not allowed. users: - avidor.turkewitz - bob.nadler @@ -103,10 +103,17 @@ dnscrypt_servers: ipv4: - cloudflare # - google +# - # E.g., if using NextDNS, this will be something like NextDNS-abc123. + # You must also fill in custom_server_stamps below. You may specify + # multiple custom servers. ipv6: - cloudflare-ipv6 +custom_server_stamps: +# YourCustomServer: 'sdns://...' + # DNS servers which will be used if 'dns_encryption' is 'false'. +# Fallback resolvers for systemd-resolved # The default is to use Cloudflare. dns_servers: ipv4: @@ -129,7 +136,7 @@ strongswan_log_level: 2 # rightsourceip for ipsec # ipv4 -strongswan_network: 10.19.48.0/24 +strongswan_network: 10.48.0.0/16 # ipv6 strongswan_network_ipv6: '2001:db8:4160::/48' @@ -139,13 +146,15 @@ strongswan_network_ipv6: '2001:db8:4160::/48' wireguard_PersistentKeepalive: 0 # WireGuard network configuration -wireguard_network_ipv4: 10.19.49.0/24 +wireguard_network_ipv4: 10.49.0.0/16 wireguard_network_ipv6: 2001:db8:a160::/48 # Randomly generated IP address for the local dns resolver local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" +# Hide sensitive data +no_log: true congrats: common: | @@ -171,14 +180,21 @@ SSH_keys: cloud_providers: azure: size: Standard_B1S + osDisk: + # The storage account type to use for the OS disk. Possible values: + # 'Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS', + # 'Premium_ZRS', 'StandardSSD_ZRS', 'PremiumV2_LRS'. + type: Standard_LRS image: publisher: Canonical - offer: 0001-com-ubuntu-server-focal-daily - sku: 20_04-daily-lts + offer: 0001-com-ubuntu-minimal-jammy-daily + sku: minimal-22_04-daily-lts version: latest digitalocean: + # See docs for extended droplet options, pricing, and availability. + # Possible values: 's-1vcpu-512mb-10gb', 's-1vcpu-1gb', ... size: s-1vcpu-1gb - image: "ubuntu-20-04-x64" + image: "ubuntu-22-04-x64" ec2: # Change the encrypted flag to "false" to disable AWS volume encryption. encrypted: true @@ -187,32 +203,39 @@ cloud_providers: use_existing_eip: false size: t2.micro image: - name: "ubuntu-focal-20.04" + name: "ubuntu-jammy-22.04" + arch: x86_64 owner: "099720109477" + # Change instance_market_type from "on-demand" to "spot" to launch a spot + # instance. See deploy-from-ansible.md for spot's additional IAM permission + instance_market_type: on-demand gce: - size: f1-micro - image: ubuntu-2004-lts + size: e2-micro + image: ubuntu-2204-lts external_static_ip: false lightsail: - size: nano_1_0 - image: ubuntu_18_04 + size: nano_2_0 + image: ubuntu_22_04 scaleway: size: DEV1-S - image: Ubuntu 20.04 Focal Fossa + image: Ubuntu 22.04 Jammy Jellyfish arch: x86_64 hetzner: server_type: cx11 - image: ubuntu-20.04 + image: ubuntu-22.04 openstack: flavor_ram: ">=512" - image: Ubuntu-18.04 + image: Ubuntu-22.04 cloudstack: size: Micro - image: Linux Ubuntu 20.04 LTS 64-bit + image: Linux Ubuntu 22.04 LTS 64-bit disk: 10 vultr: - os: Ubuntu 20.04 x64 - size: 1024 MB RAM,25 GB SSD,1.00 TB BW + os: Ubuntu 22.04 LTS x64 + size: vc2-1c-1gb + linode: + type: g6-nanode-1 + image: linode/ubuntu22.04 local: fail_hint: diff --git a/deploy_client.yml b/deploy_client.yml index 8ee8767..ca89c40 100644 --- a/deploy_client.yml +++ b/deploy_client.yml @@ -13,7 +13,7 @@ ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}" vpn_user: "{{ vpn_user }}" IP_subject_alt_name: "{{ server_ip }}" - ansible_python_interpreter: "/usr/bin/python3" + ansible_python_interpreter: /usr/bin/python3 - name: Configure the client and install required software hosts: client-host diff --git a/docs/client-linux-ipsec.md b/docs/client-linux-ipsec.md index 537a1bc..e9ecadb 100644 --- a/docs/client-linux-ipsec.md +++ b/docs/client-linux-ipsec.md @@ -5,8 +5,8 @@ Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets, ## Ubuntu Server example 1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan -2. `/etc/ipsec.d/certs`: copy `.crt` from `algo-master/configs//ipsec/manual/.crt` -3. `/etc/ipsec.d/private`: copy `.key` from `algo-master/configs//ipsec/manual/.key` +2. `/etc/ipsec.d/certs`: copy `.crt` from `algo-master/configs//ipsec/.pki/certs/.crt` +3. `/etc/ipsec.d/private`: copy `.key` from `algo-master/configs//ipsec/.pki/private/.key` 4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs//ipsec/manual/cacert.pem` 5. `/etc/ipsec.secrets`: add your `user.key` to the list, e.g. ` : ECDSA .key` 6. `/etc/ipsec.conf`: add the connection from `ipsec_user.conf` and ensure `leftcert` matches the `.crt` filename diff --git a/docs/client-openwrt-router-wireguard.md b/docs/client-openwrt-router-wireguard.md new file mode 100644 index 0000000..e3c0e70 --- /dev/null +++ b/docs/client-openwrt-router-wireguard.md @@ -0,0 +1,88 @@ +# Using Router with OpenWRT as a Client with WireGuard +This scenario is useful in case you want to use vpn with devices which has no vpn capability like smart tv, or make vpn connection available via router for multiple devices. +This is a tested, working scenario with following environment: + +- algo installed ubuntu at digitalocean +- client side router "TP-Link TL-WR1043ND" with openwrt ver. 21.02.1. [Openwrt Install instructions](https://openwrt.org/toh/tp-link/tl-wr1043nd) +- or client side router "TP-Link Archer C20i AC750" with openwrt ver. 21.02.1. [Openwrt install instructions](https://openwrt.org/toh/tp-link/archer_c20i) +see compatible device list at https://openwrt.org/toh/start . Theoretically any of the device on list should work + + + +## Router setup +Make sure that you have +- router with openwrt installed, +- router is connected to internet, +- router and device in front of router does not have same ip . By default openwrt have 192.168.1.1 if so change it to something like 192.168.2.1 +### Install required packages(WebUI) +- Open router web UI (mostly http://192.168.1.1 ) +- Login. (by default username: root, password: +- System -> Software, click "Update lists" +- Install following packages wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5 +- restart router + +### Alternative Install required packages(ssh) +- Open router web UI (mostly http://192.168.1.1 ) +- ssh root@192.168.1.1 +- opkg update +- opkg install wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5 +- reboot + +### Create an Interface(WebUI) +- Open router web UI +- Navigate Network -> Interface +- Click "Add new interface" +- Give a Name. e.g. `AlgoVpn` +- Select Protocol. `Wireguard VPN` +- click `Create Interface` +- In *General Settings* tab +- `Bring up on boot` *checked* +- Private key: `Interface -> Private Key` from algo config file +- Ip Address: `Interface -> Address` from algo config file +- In *Peers* tab +- Click add +- Name `algo` +- Public key: `[Peer]->PublicKey` from algo config file +- Preshared key: `[Peer]->PresharedKey` from algo config file +- Allowed IPs: 0.0.0.0/0 +- Route Allowed IPs: checked +- Endpoint Host: `[Peer]->Endpoint` ip from algo config file +- Endpoint Port: `[Peer]->Endpoint` port from algo config file +- Persistent Keep Alive: `25` +- Click Save & Save Apply + +### Configure Firewall(WebUI) +- Open router web UI +- Navigate to Network -> Firewall +- Click `Add configuration`: +- Name: e.g. ivpn_fw +- Input: Reject +- Output: Accept +- Forward: Reject +- Masquerading: Checked +- MSS clamping: Checked +- Covered networks: Select created VPN interface +- Allow forward to destination zones - Unspecified +- Allow forward from source zones - lan +- Click Save & Save Apply +- Reboot router + + +There may be additional configuration required depending on environment like dns configuration. + +You can also verify the configuration using ssh. /etc/config/network. It should look like + +``` +config interface 'algo' + option proto 'wireguard' + list addresses '10.0.0.2/32' + option private_key '......' # The private key generated by itself just now + +config wireguard_wg0 + option public_key '......' # Server's public key + option route_allowed_ips '1' + list allowed_ips '0.0.0.0/0' + option endpoint_host '......' # Server's public ip address + option endpoint_port '51820' + option persistent_keepalive '25' +``` diff --git a/docs/cloud-amazon-ec2.md b/docs/cloud-amazon-ec2.md index 1bbf30b..41a7c8f 100644 --- a/docs/cloud-amazon-ec2.md +++ b/docs/cloud-amazon-ec2.md @@ -6,18 +6,28 @@ Creating an Amazon AWS account requires giving Amazon a phone number that can re ### Select an EC2 plan -The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the "AWS Free Tier." It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices. +The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the ["AWS Free Tier"](https://aws.amazon.com/free/). It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices. *Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account. As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits. +If you are not eligible for the free tier plan or have passed the 12 months of the introductory period, you can switch to [AWS Graviton](https://aws.amazon.com/ec2/graviton/) instances that are generally cheaper. To use the graviton instances, make the following changes in the ec2 section of your `config.cfg` file: +* Set the `size` to `t4g.nano` +* Set the `arch` to `arm64` + +> Currently, among all the instance sizes available on AWS, the t4g.nano instance is the least expensive option that does not require any promotional offers. However, AWS is currently running a promotion that provides a free trial of the `t4g.small` instance until December 31, 2023, which is available to all customers. For more information about this promotion, please refer to the [documentation](https://aws.amazon.com/ec2/faqs/#t4g-instances). + +Additional configurations are documented in the [EC2 section of the deploy from ansible guide](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#amazon-ec2) + ### Create an AWS permissions policy In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy. Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment). +When prompted to name the policy, name it `AlgoVPN_Provisioning`. + ![Creating a new permissions policy in the AWS console.](/docs/images/aws-ec2-new-policy.png) ### Set up an AWS user @@ -48,22 +58,27 @@ On the final screen, click the Download CSV button. This file includes the AWS a After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account. -First you will be asked which server type to setup. You would want to enter "2" to use Amazon EC2. +First you will be asked which server type to setup. You would want to enter "3" to use Amazon EC2. ``` $ ./algo What provider would you like to use? 1. DigitalOcean - 2. Amazon EC2 - 3. Microsoft Azure - 4. Google Compute Engine - 5. Scaleway - 6. OpenStack (DreamCompute optimised) - 7. Install to existing Ubuntu 16.04 server (Advanced) + 2. Amazon Lightsail + 3. Amazon EC2 + 4. Microsoft Azure + 5. Google Compute Engine + 6. Hetzner Cloud + 7. Vultr + 8. Scaleway + 9. OpenStack (DreamCompute optimised) + 10. CloudStack (Exoscale optimised) + 11. Linode + 12. Install to existing Ubuntu server (for more advanced users) Enter the number of your desired provider -: 2 +: 3 ``` Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo). @@ -72,11 +87,11 @@ Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md). [pasted values will not be displayed] -[AKIA...]: +[AKIA...]: Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) [pasted values will not be displayed] -[ABCD...]: +[ABCD...]: ``` You will be prompted for the server name to enter. Feel free to leave this as the default ("algo") if you are not certain how this will affect your setup. Here we chose to call it "algovpn". @@ -107,7 +122,7 @@ What region should the server be located in? 14. us-east-2 15. us-west-1 16. us-west-2 - + Enter the number of your desired region [13] : @@ -116,4 +131,5 @@ Enter the number of your desired region You will then be asked the remainder of the standard Algo setup questions. ## Cleanup + If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack. diff --git a/docs/cloud-cloudstack.md b/docs/cloud-cloudstack.md index e9cd3c6..672778e 100644 --- a/docs/cloud-cloudstack.md +++ b/docs/cloud-cloudstack.md @@ -1,20 +1,11 @@ ### Configuration file -You need to create a configuration file in INI format with your api key in `$HOME/.cloudstack.ini` +Algo scripts will ask you for the API detail. You need to fetch the API credentials and the endpoint from the provider control panel. -``` -[cloudstack] -endpoint = -key = -secret = -timeout = 30 -``` +Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u//account/profile/api to gather the required information: CloudStack api key and secret. -Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u//account/profile/api to gather the required information: -``` -[exoscale] -endpoint = https://api.exoscale.com/compute -key = -secret = -timeout = 30 +```bash +export CLOUDSTACK_KEY="" +export CLOUDSTACK_SECRET="" +export CLOUDSTACK_ENDPOINT="https://api.exoscale.com/compute" ``` diff --git a/docs/cloud-do.md b/docs/cloud-do.md index 59596e0..88ec8e9 100644 --- a/docs/cloud-do.md +++ b/docs/cloud-do.md @@ -18,6 +18,18 @@ You will be returned to the **Tokens/Keys** tab, and your new key will be shown Copy or note down the hash that shows below the name you entered, as this will be necessary for the steps below. This value will disappear if you leave this page, and you'll need to regenerate it if you forget it. +## Select a Droplet (optional) + +The default option is the `s-1vcpu-1gb` because it is available in all regions. However, you may want to switch to a cheaper droplet such as `s-1vcpu-512mb-10gb` even though it is not available in all regions. This can be edited in the [Configuration File](config.cfg) under `cloud_providers > digitalocean > size`. See this brief comparison between the two droplets below: + +| Droplet Type | Monthly Cost | Bandwidth | Availability | +|:--|:-:|:-:|:--| +| `s-1vcpu-512mb-10gb` | $4/month | 0.5 TB | Limited | +| `s-1vcpu-1gb` | $6/month | 1.0 TB | All regions | +| ... | ... | ... | ... | + +*Note: Exceeding bandwidth limits costs $0.01/GiB at time of writing ([docs](https://docs.digitalocean.com/products/billing/bandwidth/#droplets)). See the live list of droplets [here](https://slugs.do-api.dev/).* + ## Using DigitalOcean with Algo (interactive) These steps are for those who run Algo using Docker or using the `./algo` command. diff --git a/docs/cloud-gce.md b/docs/cloud-gce.md index c846765..f88c837 100644 --- a/docs/cloud-gce.md +++ b/docs/cloud-gce.md @@ -38,4 +38,4 @@ gcloud services enable compute.googleapis.com **Attention:** take care of the `configs/gce.json` file, which contains the credentials to manage your Google Cloud account, including create and delete servers on this project. -There are more advanced arguments available for deploynment [using ansible](deploy-from-ansible.md). +There are more advanced arguments available for deployment [using ansible](deploy-from-ansible.md). diff --git a/docs/cloud-hetzner.md b/docs/cloud-hetzner.md index f0cb28b..1428e29 100644 --- a/docs/cloud-hetzner.md +++ b/docs/cloud-hetzner.md @@ -1,3 +1,3 @@ ## API Token -Sign in into the [Hetzner Cloud Console](https://console.hetzner.cloud/) choose a project, go to `Access` → `Tokens`, and create a new token. Make sure to copy the token because it won’t be shown to you again. A token is bound to a project, to interact with the API of another project you have to create a new token inside the project. +Sign in into the [Hetzner Cloud Console](https://console.hetzner.cloud/) choose a project, go to `Security` → `API Tokens`, and `Generate API Token` with `Read & Write` access. Make sure to copy the token because it won’t be shown to you again. A token is bound to a project. To interact with the API of another project you have to create a new token inside the project. diff --git a/docs/cloud-linode.md b/docs/cloud-linode.md new file mode 100644 index 0000000..3bbb6b5 --- /dev/null +++ b/docs/cloud-linode.md @@ -0,0 +1,9 @@ +## API Token + +Sign into the Linode Manager and go to the +[tokens management page](https://cloud.linode.com/profile/tokens). + +Click `Add a Personal Access Token`. Label your new token and select *at least* the +`Linodes` read/write permission and `StackScripts` read/write permission. +Press `Submit` and make sure to copy the displayed token +as it won't be shown again. diff --git a/docs/cloud-scaleway.md b/docs/cloud-scaleway.md index 7e6a02a..011469c 100644 --- a/docs/cloud-scaleway.md +++ b/docs/cloud-scaleway.md @@ -1,9 +1,10 @@ ### Configuration file Algo requires an API key from your Scaleway account to create a server. -The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/account/credentials](https://console.scaleway.com/account/credentials), and then selecting "Generate new token" on the right side of the box labeled "API Tokens". +The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/project/credentials](https://console.scaleway.com/project/credentials), and then selecting "Generate new API key" on the right side of the box labeled "API Keys". +You'll be ask for to specify a purpose for your API key before it is created. You will then be presented and "Access key" and a "Secret key". -Enter this token when Algo prompts you for the `auth token`. +Enter the "Secret key" when Algo prompts you for the `auth token`. You won't need the "Access key". This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt. Your organization ID is also on this page: https://console.scaleway.com/account/credentials diff --git a/docs/deploy-from-ansible.md b/docs/deploy-from-ansible.md index ffc5217..f7d6b96 100644 --- a/docs/deploy-from-ansible.md +++ b/docs/deploy-from-ansible.md @@ -51,23 +51,24 @@ Cloud roles: - role: cloud-openstack, [provider: openstack](#openstack) - role: cloud-cloudstack, [provider: cloudstack](#cloudstack) - role: cloud-hetzner, [provider: hetzner](#hetzner) +- role: cloud-linode, [provider: linode](#linode) Server roles: - role: strongswan - * Installs [strongSwan](https://www.strongswan.org/) - * Enables AppArmor, limits CPU and memory access, and drops user privileges - * Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user - * Bundles the appropriate certificates into Apple mobileconfig profiles for each user + - Installs [strongSwan](https://www.strongswan.org/) + - Enables AppArmor, limits CPU and memory access, and drops user privileges + - Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user + - Bundles the appropriate certificates into Apple mobileconfig profiles for each user - role: dns_adblocking - * Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false` - * Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations + - Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false` + - Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations - role: ssh_tunneling - * Adds a restricted `algo` group with no shell access and limited SSH forwarding options - * Creates one limited, local account and an SSH public key for each user + - Adds a restricted `algo` group with no shell access and limited SSH forwarding options + - Creates one limited, local account and an SSH public key for each user - role: wireguard - * Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades - * Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients + - Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades + - Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables: @@ -95,7 +96,7 @@ Required variables: - do_token - region -Possible options can be gathered calling to https://api.digitalocean.com/v2/regions +Possible options can be gathered calling to ### Amazon EC2 @@ -109,9 +110,26 @@ Possible options can be gathered via cli `aws ec2 describe-regions` Additional variables: -- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: false) +- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: true) +- [size](https://aws.amazon.com/ec2/instance-types/) - EC2 instance type. String (Default: t2.micro) +- [image](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html) - AMI `describe-images` search parameters to find the OS for the hosted image. Each OS and architecture has a unique AMI-ID. The OS owner, for example [Ubuntu](https://cloud-images.ubuntu.com/locator/ec2/), updates these images often. If parameters below result in multiple results, the most recent AMI-ID is chosen -#### Minimum required IAM permissions for deployment: + ``` + # Example of equivalent cli command + aws ec2 describe-images --owners "099720109477" --filters "Name=architecture,Values=arm64" "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-jammy-22.04*" + ``` + + - [owners] - The operating system owner id. Default is [Canonical](https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29) (Default: 099720109477) + - [arch] - The architecture (Default: x86_64, Optional: arm64) + - [name] - The wildcard string to filter available ami names. Algo appends this name with the string "-\*64-server-\*", and prepends with "ubuntu/images/hvm-ssd/" (Default: Ubuntu latest LTS) +- [instance_market_type](https://aws.amazon.com/ec2/pricing/) - Two pricing models are supported: on-demand and spot. String (Default: on-demand) + - If using spot instance types, one additional IAM permission along with the below minimum is required for deployment: + + ``` + "ec2:CreateLaunchTemplate" + ``` + +#### Minimum required IAM permissions for deployment ``` { @@ -149,14 +167,18 @@ Additional variables: "Sid": "CloudFormationEC2Access", "Effect": "Allow", "Action": [ + "ec2:DescribeRegions", "ec2:CreateInternetGateway", "ec2:DescribeVpcs", "ec2:CreateVpc", "ec2:DescribeInternetGateways", "ec2:ModifyVpcAttribute", - "ec2:createTags", + "ec2:CreateTags", "ec2:CreateSubnet", - "ec2:Associate*", + "ec2:AssociateVpcCidrBlock", + "ec2:AssociateSubnetCidrBlock", + "ec2:AssociateRouteTable", + "ec2:AssociateAddress", "ec2:CreateRouteTable", "ec2:AttachInternetGateway", "ec2:DescribeRouteTables", @@ -213,7 +235,7 @@ Required variables: Possible options can be gathered via cli `aws lightsail get-regions` -#### Minimum required IAM permissions for deployment: +#### Minimum required IAM permissions for deployment ``` { @@ -226,7 +248,27 @@ Possible options can be gathered via cli `aws lightsail get-regions` "lightsail:GetRegions", "lightsail:GetInstance", "lightsail:CreateInstances", - "lightsail:OpenInstancePublicPorts" + "lightsail:DisableAddOn", + "lightsail:PutInstancePublicPorts", + "lightsail:StartInstance", + "lightsail:TagResource", + "lightsail:GetStaticIp", + "lightsail:AllocateStaticIp", + "lightsail:AttachStaticIp" + ], + "Resource": [ + "*" + ] + }, + { + "Sid": "DeployCloudFormationStack", + "Effect": "Allow", + "Action": [ + "cloudformation:CreateStack", + "cloudformation:UpdateStack", + "cloudformation:DescribeStacks", + "cloudformation:DescribeStackEvents", + "cloudformation:ListStackResources" ], "Resource": [ "*" @@ -264,6 +306,13 @@ Required variables: - hcloud_token: Your [API token](https://trailofbits.github.io/algo/cloud-hetzner.html#api-token) - can also be defined in the environment as HCLOUD_TOKEN - region: e.g. `nbg1` +### Linode + +Required variables: + +- linode_token: Your [API token](https://trailofbits.github.io/algo/cloud-linode.html#api-token) - can also be defined in the environment as LINODE_TOKEN +- region: e.g. `us-east` + ### Update users Playbook: diff --git a/docs/deploy-from-cloudshell.md b/docs/deploy-from-cloudshell.md index f0f0fed..2e75e91 100644 --- a/docs/deploy-from-cloudshell.md +++ b/docs/deploy-from-cloudshell.md @@ -1,4 +1,5 @@ # Deploy from Google Cloud Shell +**IMPORTANT NOTE: As of 2021-12-14 Algo requires Python 3.8, but Google Cloud Shell only provides Python 3.7.3. The instructions below will not work until Google updates Cloud Shell to have at least Python 3.8.** If you want to try Algo but don't wish to install the software on your own system you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server. diff --git a/docs/deploy-from-docker.md b/docs/deploy-from-docker.md index 34f7bb2..523ab9f 100644 --- a/docs/deploy-from-docker.md +++ b/docs/deploy-from-docker.md @@ -13,28 +13,36 @@ While it is not possible to run your Algo server from within a Docker container, 2. Create a local directory to hold your VPN configs (e.g. `C:\Users\trailofbits\Documents\VPNs\`) 3. Create a local copy of [config.cfg](https://github.com/trailofbits/algo/blob/master/config.cfg), with required modifications (e.g. `C:\Users\trailofbits\Documents\VPNs\config.cfg`) 4. Run the Docker container, mounting your configurations appropriately (assuming the container is named `trailofbits/algo` with a tag `latest`): - - From Windows: + +- From Windows: + ```powershell C:\Users\trailofbits> docker run --cap-drop=all -it \ -v C:\Users\trailofbits\Documents\VPNs:/data \ - trailofbits/algo:latest + ghcr.io/trailofbits/algo:latest ``` - - From Linux: + +- From Linux: + ```bash $ docker run --cap-drop=all -it \ -v /home/trailofbits/Documents/VPNs:/data \ - trailofbits/algo:latest + ghcr.io/trailofbits/algo:latest ``` + 5. When it exits, you'll be left with a fully populated `configs` directory, containing all appropriate configuration data for your clients, and for future server management ### Providing Additional Files + If you need to provide additional files -- like authorization files for Google Cloud Project -- you can simply specify an additional `-v` parameter, and provide the appropriate path when prompted by `algo`. For example, you can specify `-v C:\Users\trailofbits\Documents\VPNs\gce_auth.json:/algo/gce_auth.json`, making the local path to your credentials JSON file `/algo/gce_auth.json`. ### Scripted deployment + Ansible variables (see [Deployment from Ansible](deploy-from-ansible.md)) can be passed via `ALGO_ARGS` environment variable. _The leading `-e` (or `--extra-vars`) is required_, e.g. + ```bash $ ALGO_ARGS="-e provider=digitalocean @@ -50,7 +58,7 @@ $ ALGO_ARGS="-e $ docker run --cap-drop=all -it \ -e "ALGO_ARGS=$ALGO_ARGS" \ -v /home/trailofbits/Documents/VPNs:/data \ - trailofbits/algo:latest + ghcr.io/trailofbits/algo:latest ``` ## Managing an Algo Server with Docker @@ -58,11 +66,12 @@ $ docker run --cap-drop=all -it \ Even though the container itself is transient, because you've persisted the configuration data, you can use the same Docker image to manage your Algo server. This is done by setting the environment variable `ALGO_ARGS`. If you want to use Algo to update the users on an existing server, specify `-e "ALGO_ARGS=update-users"` in your `docker run` command: + ```powershell $ docker run --cap-drop=all -it \ -e "ALGO_ARGS=update-users" \ -v C:\Users\trailofbits\Documents\VPNs:/data \ - trailofbits/algo:latest + ghcr.io/trailofbits/algo:latest ``` ## GNU Makefile for Docker diff --git a/docs/deploy-from-macos.md b/docs/deploy-from-macos.md index ae6d757..ba2855b 100644 --- a/docs/deploy-from-macos.md +++ b/docs/deploy-from-macos.md @@ -2,25 +2,29 @@ While you can't turn a macOS system in an AlgoVPN, you can install the Algo scripts on a macOS system and use them to deploy your AlgoVPN to a cloud provider. -Algo uses [Ansible](https://www.ansible.com) which requires Python 3. macOS does not include a version of Python 3 that you can use with Algo. (It does include an obsolete version of Python 2 installed as `/usr/bin/python` which you should ignore.) - -You'll need to install Python 3 before you can run Algo. Python 3 is available from several different packagers, three of which are listed below. +Algo uses [Ansible](https://www.ansible.com) which requires Python 3. macOS includes an obsolete version of Python 2 installed as `/usr/bin/python` which you should ignore. ## macOS 10.15 Catalina -Catalina comes with `/usr/bin/python3` installed. This file, and certain others like `/usr/bin/git`, start out as stub files that prompt you to install the Developer Command Line Tools the first time you run them. Having `git` installed can be useful but whether or not you choose to install the Command Line Tools you **cannot** use this version of Python 3 with Algo at this time. Instead install one of the versions below. +Catalina comes with Python 3 installed as `/usr/bin/python3`. This file, and certain others like `/usr/bin/git`, start out as stub files that prompt you to install the Command Line Developer Tools package the first time you run them. This is the easiest way to install Python 3 on Catalina. + +Note that Python 3 from Command Line Developer Tools prior to the release for Xcode 11.5 on 2020-05-20 might not work with Algo. If Software Update does not offer to update an older version of the tools you can download a newer version from [here](https://developer.apple.com/download/more/) (Apple ID login required). + +## macOS prior to 10.15 Catalina + +You'll need to install Python 3 before you can run Algo. Python 3 is available from different packagers, two of which are listed below. -## Ansible and SSL Validation +### Ansible and SSL Validation Ansible validates SSL network connections using OpenSSL but macOS includes LibreSSL which behaves differently. Therefore each version of Python below includes or depends on its own copy of OpenSSL. -OpenSSL needs access to a list of trusted CA certificates in order to validate SSL connections. Each packager handles initializing this certificate store differently. If you see the error `CERTIFICATE_VERIFY_FAILED` when running Algo make sure you've followed the packager-specific instructions correctly, and that you're not inadvertently running Catalina's `/usr/bin/python3`. +OpenSSL needs access to a list of trusted CA certificates in order to validate SSL connections. Each packager handles initializing this certificate store differently. If you see the error `CERTIFICATE_VERIFY_FAILED` when running Algo make sure you've followed the packager-specific instructions correctly. -## Install Python 3 +### Choose a packager and install Python 3 Choose one of the packagers below as your source for Python 3. Avoid installing versions from multiple packagers on the same Mac as you may encounter conflicts. In particular they might fight over creating symbolic links in `/usr/local/bin`. -### Option 1: Install using the Homebrew package manager +#### Option 1: Install using the Homebrew package manager If you're comfortable using the command line in Terminal the [Homebrew](https://brew.sh) project is a great source of software for macOS. @@ -28,22 +32,22 @@ First install Homebrew using the instructions on the [Homebrew](https://brew.sh) The install command below takes care of initializing the CA certificate store. -#### Installation +##### Installation ``` brew install python3 ``` After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/usr/local/bin/python3`. -#### Removal +##### Removal ``` brew uninstall python3 ``` -### Option 2: Install a package from Python.org +#### Option 2: Install the package from Python.org -If you don't want to install a package manager you can download a Python package for macOS from [python.org](https://www.python.org/downloads/mac-osx/). +If you don't want to install a package manager you can download the Python package for macOS from [python.org](https://www.python.org/downloads/mac-osx/). -#### Installation +##### Installation Download the most recent version of Python and install it like any other macOS package. Then initialize the CA certificate store from Finder by double-clicking on the file `Install Certificates.command` found in the `/Applications/Python 3.8` folder. @@ -51,7 +55,7 @@ When you double-click on `Install Certificates.command` a new Terminal window wi After installation open a new tab or window in Terminal and verify that the command `which python3` returns either `/usr/local/bin/python3` or `/Library/Frameworks/Python.framework/Versions/3.8/bin/python3`. -#### Removal +##### Removal Unfortunately the python.org package does not include an uninstaller and removing it requires several steps: @@ -60,24 +64,3 @@ Unfortunately the python.org package does not include an uninstaller and removin 3. In Terminal, undo the changes to your `PATH` by running: ```mv ~/.bash_profile.pysave ~/.bash_profile``` 4. In Terminal, remove the dozen or so symbolic links the package created in `/usr/local/bin`. Or just leave them because installing another version of Python will overwrite most of them. - -### Option 3: Install using the Macports package manager - -[Macports](https://www.macports.org) is another command line based package manager like Homebrew. Most users will find Macports far more complex than Homebrew, but developers might find Macports more flexible. If you search for "Macports vs. Homebrew" you will find many opinions. - -First install Macports per the [instructions](https://www.macports.org/install.php). - -In addition to installing Python you'll need to install the package containing the CA certificates. - -#### Installation -``` -sudo port install python38 -sudo port install curl-ca-bundle -``` -After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/opt/local/bin/python3`. - -#### Removal -``` -sudo port uninstall python38 -sudo port uninstall curl-ca-bundle -``` diff --git a/docs/deploy-from-redhat-centos6.md b/docs/deploy-from-redhat-centos6.md deleted file mode 100644 index a0e6fb9..0000000 --- a/docs/deploy-from-redhat-centos6.md +++ /dev/null @@ -1,97 +0,0 @@ -# RedHat/CentOS 6.x pre-installation requirements - -Many people prefer RedHat or CentOS 6 (or similar variants like Amazon Linux) for to their stability and lack of systemd. Unfortunately, there are a number of dated libraries, notably Python 2.6, that prevent Algo from running without errors. This script will prepare a RedHat, CentOS, or similar VM to deploy to Algo cloud instances. - -## Step 1: Prep for RH/CentOS 6.8/Amazon - -```shell -yum -y update -yum -y install epel-release -``` - -Enable any kernel updates: - -```shell -reboot -``` - -## Step 2: Install Ansible and launch Algo - -RedHat/CentOS 6.x uses Python 2.6 by default, which is explicitly deprecated and produces many warnings and errors, so we must install a safe, non-invasive 3.6 tool set which has to be expressly enabled (and will not survive login sessions and reboots): - -- Install the Software Collections Library (to enable Python 3.6) -```shell -yum -y install centos-release-SCL -yum -y install \ - openssl-devel \ - libffi-devel \ - automake \ - gcc \ - gcc-c++ \ - kernel-devel \ - rh-python36-python \ - rh-python36-python-devel \ - rh-python36-python-setuptools \ - rh-python36-python-pip \ - rh-python36-python-virtualenv \ - rh-python36-python-crypto \ - rh-python36-PyYAML \ - libselinux-python \ - python-crypto \ - wget \ - unzip \ - nano -``` - -- 3.6 will not be used until explicitly enabled, per login session. Enable 3.6 default for this session (needs re-run between logins & reboots) -``` -scl enable rh-python36 bash -``` - -- We're now defaulted to 3.6. Upgrade required components -``` -python3 -m pip install -U pip virtualenv pycrypto setuptools -``` - -- Download and uzip Algo -``` -wget https://github.com/trailofbits/algo/archive/master.zip -unzip master.zip -cd algo-master || echo "No Algo directory found" -``` - -- Set up a virtualenv and install the local Algo dependencies (must be run from algo-master) -``` -python3 -m virtualenv --python="$(command -v python3)" .env -source .env/bin/activate -python3 -m pip install -U pip virtualenv -python3 -m pip install -r requirements.txt -``` - -- Edit the userlist and any other settings you desire -``` -nano config.cfg -``` - -- Now you can run the Algo installer! -``` -./algo -``` - -## Post-install macOS - -1. Copy `./configs/*mobileconfig` to your local Mac - -2. Install the VPN profile on your Mac (10.10+ required) - - ```shell - /usr/bin/profiles -I -F ./x.x.x.x_NAME.mobileconfig - ``` - -3. To remove: - - ```shell - /usr/bin/profiles -D -F ./x.x.x.x_NAME.mobileconfig - ``` - -The VPN connection will now appear under Networks (which can be pinned to the top menu bar if preferred) diff --git a/docs/deploy-from-windows.md b/docs/deploy-from-windows.md index a9806b2..ac6ba49 100644 --- a/docs/deploy-from-windows.md +++ b/docs/deploy-from-windows.md @@ -21,7 +21,7 @@ Wait a minute for Windows to install a few things in the background (it will eve 2. Click on 'Turn Windows features on or off' 3. Scroll down and check 'Windows Subsystem for Linux', and then click OK. 4. The subsystem will be installed, then Windows will require a restart. -5. Restart Windows and then [install Ubuntu 18.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-1804-lts/9n9tngvndl3q) (at this time Ubuntu 20.04 LTS does not work with Algo when running under WSL). +5. Restart Windows and then install [Ubuntu 20.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-2004-lts/9n6svws3rx71). 6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows. ## Install Algo @@ -39,6 +39,32 @@ git clone https://github.com/trailofbits/algo cd algo ``` +## Post installation steps + +These steps should be only if you clone the Algo repository to the host machine disk (C:, D:, etc.). WSL mount host system disks to `\mnt` directory. + +### Allow git to change files metadata + +By default git cannot change files metadata (using chmod for example) for files stored at host machine disks (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#set-wsl-launch-settings). Allow it: + +1. Start Ubuntu Terminal. +2. Edit /etc/wsl.conf (create it if it doesn't exist). Add the following: +``` +[automount] +options = "metadata" +``` +3. Close all Ubuntu Terminals. +4. Run powershell. +5. Run `wsl --shutdown` in powershell. + +### Allow run Ansible in a world writable directory + +Ansible threat host machine directories as world writable directory and do not load .cfg from it by default (https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir). For fix run inside `algo` directory: + +```shell +chmod 744 . +``` + Now you can continue by following the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) from the 4th step to deploy your Algo server! You'll be instructed to edit the file `config.cfg` in order to specify the Algo user accounts to be created. If you're new to Linux the simplest editor to use is `nano`. To edit the file while in the `algo` directory, run: diff --git a/docs/deploy-to-ubuntu.md b/docs/deploy-to-ubuntu.md index 2734af5..8f11ae7 100644 --- a/docs/deploy-to-ubuntu.md +++ b/docs/deploy-to-ubuntu.md @@ -1,18 +1,25 @@ # Local Installation +**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information. + +------ + +## Outbound VPN Server + You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it to create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment. If you're new to Algo or unfamiliar with Linux you'll find a cloud deployment to be easier. To perform a local installation, install the Algo scripts following the normal installation instructions, then choose: + ``` -Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users) +Install to existing Ubuntu latest LTS server (for more advanced users) ``` + Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`). -# Road Warrior setup +## Inbound VPN Server (also called "Road Warrior" setup) Some may find it useful to set up an Algo server on an Ubuntu box on your home LAN, with the intention of being able to securely access your LAN and any resources on it when you're traveling elsewhere (the ["road warrior" setup](https://en.wikipedia.org/wiki/Road_warrior_(computing))). A few tips if you're doing so: + - Make sure you forward any [relevant incoming ports](/docs/firewalls.md#external-firewall) to the Algo server from your router; - Change `BetweenClients_DROP` in `config.cfg` to `false`, and also consider changing `block_smb` and `block_netbios` to `false`; - If you want to use a DNS server on your LAN to resolve local domain names properly (e.g. a Pi-hole), set the `dns_encryption` flag in `config.cfg` to `false`, and change `dns_servers` to the local DNS server IP (i.e. `192.168.1.2`). - -**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information. diff --git a/docs/deploy-to-unsupported-cloud.md b/docs/deploy-to-unsupported-cloud.md index 6e1a5f9..5c18a5b 100644 --- a/docs/deploy-to-unsupported-cloud.md +++ b/docs/deploy-to-unsupported-cloud.md @@ -2,7 +2,7 @@ Algo officially supports the [cloud providers listed here](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another virtual hosting provider, that provider must support: -1. the base operating system image that Algo uses (Ubuntu 18.04 or 20.04), and +1. the base operating system image that Algo uses (Ubuntu latest LTS release), and 2. a minimum of certain kernel modules required for the strongSwan IPsec server. Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider. diff --git a/docs/faq.md b/docs/faq.md index 109093e..7ce81e8 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -17,7 +17,7 @@ ## Has Algo been audited? -No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://empireslacking.herokuapp.com). +No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://slack.empirehacking.nyc). ## What's the current status of WireGuard? diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index f8a5f76..5d6d488 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -22,6 +22,8 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens * [Error: Failed to create symlinks for deploying to localhost](#error-failed-to-create-symlinks-for-deploying-to-localhost) * [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths) * [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password) + * [Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160](#old-networking-firewall-in-place) + * [Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; "](#linode-error-uable-to-query-the-linode-api-saw-400-the-requested-distribution-is-not-supported-by-this-stackscript) * [Connection Problems](#connection-problems) * [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites) * [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device) @@ -41,7 +43,7 @@ Look here if you have a problem running the installer to set up a new Algo serve ### Python version is not supported -The minimum Python version required to run Algo is 3.6. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md) +The minimum Python version required to run Algo is 3.8. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md) ### Error: "You have not agreed to the Xcode license agreements" @@ -362,6 +364,32 @@ sudo chown $USER:$USER $HOME/.rnd Now, run Algo again. +### Old Networking Firewall In Place + +You may see the following output when attemptint to run ./algo from your localhost: + +``` +TASK [Wait until SSH becomes ready...] ********************************************************************************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "elapsed": 321, "msg": "Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160"} +included: /home//algo/algo/playbooks/rescue.yml for localhost + +TASK [debug] ************************************************************************************************************************************************ +ok: [localhost] => { + "fail_hint": [ + "Sorry, but something went wrong!", + "Please check the troubleshooting guide.", + "https://trailofbits.github.io/algo/troubleshooting.html" + ] +} +``` + +If you see this error then one possible explanation is that you have a previous firewall configured in your cloud hosting provider which needs to be either updated or ideally removed. Removing this can often fix this issue. + +### Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; " + +StackScript is a custom deployment script that defines a set of configurations for a Linode instance (e.g. which distribution, specs, etc.). if you used algo with default values in the past deployments, a stackscript that would've been created is 're-used' in the deployment process (in fact, go see 'create Linodes' and under 'StackScripts' tab). Thus, there's a little chance that your deployment process will generate this 'unsupported stackscript' error due to a pre-existing StackScript that doesn't support a particular configuration setting or value due to an 'old' stackscript. The quickest solution is just to change the name of your deployment from the default value of 'algo' (or any other name that you've used before, again see the dashboard) and re-run the deployment. + + ## Connection Problems Look here if you deployed an Algo server but now have a problem connecting to it with a client. @@ -502,4 +530,4 @@ If your router runs [pfSense](https://www.pfsense.org) and a single IPsec client ## I have a problem not covered here -If you have an issue that you cannot solve with the guidance here, [join our Gitter](https://gitter.im/trailofbits/algo) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new). +If you have an issue that you cannot solve with the guidance here, [create a new discussion](https://github.com/trailofbits/algo/discussions) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new). diff --git a/files/cloud-init/base.sh b/files/cloud-init/base.sh index 414a222..753441d 100644 --- a/files/cloud-init/base.sh +++ b/files/cloud-init/base.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/bin/sh set -eux +# shellcheck disable=SC2230 which sudo || until \ apt-get update -y && \ apt-get install sudo -yf --install-suggests; do @@ -15,9 +16,12 @@ cat </etc/ssh/sshd_config {{ lookup('template', 'files/cloud-init/sshd_config') }} EOF -test -d /home/algo/.ssh || (umask 077 && sudo -u algo mkdir -p /home/algo/.ssh/) -echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" | (umask 177 && sudo -u algo tee /home/algo/.ssh/authorized_keys) +test -d /home/algo/.ssh || sudo -u algo mkdir -m 0700 /home/algo/.ssh +echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" | (sudo -u algo tee /home/algo/.ssh/authorized_keys && chmod 0600 /home/algo/.ssh/authorized_keys) +ufw --force reset + +# shellcheck disable=SC2015 dpkg -l sshguard && until apt-get remove -y --purge sshguard; do sleep 3 done || true diff --git a/files/cloud-init/base.yml b/files/cloud-init/base.yml index 5cc03fd..8a14b5f 100644 --- a/files/cloud-init/base.yml +++ b/files/cloud-init/base.yml @@ -25,5 +25,6 @@ write_files: runcmd: - set -x + - ufw --force reset - sudo apt-get remove -y --purge sshguard || true - systemctl restart sshd.service diff --git a/input.yml b/input.yml index 1b06dec..64b4805 100644 --- a/input.yml +++ b/input.yml @@ -18,126 +18,126 @@ - { name: Google Compute Engine, alias: gce } - { name: Hetzner Cloud, alias: hetzner } - { name: Vultr, alias: vultr } - - { name: Scaleway, alias: scaleway} + - { name: Scaleway, alias: scaleway } - { name: OpenStack (DreamCompute optimised), alias: openstack } - { name: CloudStack (Exoscale optimised), alias: cloudstack } - - { name: "Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users)", alias: local } + - { name: Linode, alias: linode } + - { name: Install to existing Ubuntu latest LTS server (for more advanced users), alias: local } vars_files: - config.cfg tasks: - block: - - name: Cloud prompt - pause: - prompt: | - What provider would you like to use? - {% for p in providers_map %} - {{ loop.index }}. {{ p['name'] }} - {% endfor %} + - name: Cloud prompt + pause: + prompt: | + What provider would you like to use? + {% for p in providers_map %} + {{ loop.index }}. {{ p['name'] }} + {% endfor %} - Enter the number of your desired provider - register: _algo_provider - when: provider is undefined + Enter the number of your desired provider + register: _algo_provider + when: provider is undefined - - name: Set facts based on the input - set_fact: - algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}" + - name: Set facts based on the input + set_fact: + algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}" - - name: VPN server name prompt - pause: - prompt: | - Name the vpn server - [algo] - register: _algo_server_name - when: - - server_name is undefined - - algo_provider != "local" + - name: VPN server name prompt + pause: + prompt: | + Name the vpn server + [algo] + register: _algo_server_name + when: + - server_name is undefined + - algo_provider != "local" - - name: Cellular On Demand prompt - pause: - prompt: | - Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks? - [y/N] - register: _ondemand_cellular - when: ondemand_cellular is undefined + - name: Cellular On Demand prompt + pause: + prompt: | + Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks? + [y/N] + register: _ondemand_cellular + when: ondemand_cellular is undefined - - name: Wi-Fi On Demand prompt - pause: - prompt: | - Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi? - [y/N] - register: _ondemand_wifi - when: ondemand_wifi is undefined + - name: Wi-Fi On Demand prompt + pause: + prompt: | + Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi? + [y/N] + register: _ondemand_wifi + when: ondemand_wifi is undefined - - name: Trusted Wi-Fi networks prompt - pause: - prompt: | - List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand" - (e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi) - register: _ondemand_wifi_exclude - when: - - ondemand_wifi_exclude is undefined - - (ondemand_wifi|default(false)|bool) or - (booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false)) + - name: Trusted Wi-Fi networks prompt + pause: + prompt: | + List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand" + (e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi) + register: _ondemand_wifi_exclude + when: + - ondemand_wifi_exclude is undefined + - (ondemand_wifi|default(false)|bool) or (booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false)) - - name: Retain the PKI prompt - pause: - prompt: | - Do you want to retain the keys (PKI)? (required to add users in the future, but less secure) - [y/N] - register: _store_pki - when: - - store_pki is undefined - - ipsec_enabled + - name: Retain the PKI prompt + pause: + prompt: | + Do you want to retain the keys (PKI)? (required to add users in the future, but less secure) + [y/N] + register: _store_pki + when: + - store_pki is undefined + - ipsec_enabled - - name: DNS adblocking prompt - pause: - prompt: | - Do you want to enable DNS ad blocking on this VPN server? - [y/N] - register: _dns_adblocking - when: dns_adblocking is undefined + - name: DNS adblocking prompt + pause: + prompt: | + Do you want to enable DNS ad blocking on this VPN server? + [y/N] + register: _dns_adblocking + when: dns_adblocking is undefined - - name: SSH tunneling prompt - pause: - prompt: | - Do you want each user to have their own account for SSH tunneling? - [y/N] - register: _ssh_tunneling - when: ssh_tunneling is undefined + - name: SSH tunneling prompt + pause: + prompt: | + Do you want each user to have their own account for SSH tunneling? + [y/N] + register: _ssh_tunneling + when: ssh_tunneling is undefined - - name: Set facts based on the input - set_fact: - algo_server_name: >- - {% if server_name is defined %}{% set _server = server_name %} - {%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%} - {%- set _server = _algo_server_name.user_input -%} - {%- else %}{% set _server = defaults['server_name'] %}{% endif -%} - {{ _server | regex_replace('(?!\.)(\W|_)', '-') }} - algo_ondemand_cellular: >- - {% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }} - {%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }} - {%- else %}false{% endif %} - algo_ondemand_wifi: >- - {% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }} - {%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }} - {%- else %}false{% endif %} - algo_ondemand_wifi_exclude: >- - {% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }} - {%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%} - {{ _ondemand_wifi_exclude.user_input | b64encode }} - {%- else %}{{ '_null' | b64encode }}{% endif %} - algo_dns_adblocking: >- - {% if dns_adblocking is defined %}{{ dns_adblocking | bool }} - {%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }} - {%- else %}false{% endif %} - algo_ssh_tunneling: >- - {% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }} - {%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }} - {%- else %}false{% endif %} - algo_store_pki: >- - {% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }} - {%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }} - {%- else %}false{% endif %}{% endif %} + - name: Set facts based on the input + set_fact: + algo_server_name: >- + {% if server_name is defined %}{% set _server = server_name %} + {%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%} + {%- set _server = _algo_server_name.user_input -%} + {%- else %}{% set _server = defaults['server_name'] %}{% endif -%} + {{ _server | regex_replace('(?!\.)(\W|_)', '-') }} + algo_ondemand_cellular: >- + {% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }} + {%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }} + {%- else %}false{% endif %} + algo_ondemand_wifi: >- + {% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }} + {%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }} + {%- else %}false{% endif %} + algo_ondemand_wifi_exclude: >- + {% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }} + {%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%} + {{ _ondemand_wifi_exclude.user_input | b64encode }} + {%- else %}{{ '_null' | b64encode }}{% endif %} + algo_dns_adblocking: >- + {% if dns_adblocking is defined %}{{ dns_adblocking | bool }} + {%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }} + {%- else %}false{% endif %} + algo_ssh_tunneling: >- + {% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }} + {%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }} + {%- else %}false{% endif %} + algo_store_pki: >- + {% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }} + {%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }} + {%- else %}false{% endif %}{% endif %} rescue: - include_tasks: playbooks/rescue.yml diff --git a/install.sh b/install.sh index 7ebf617..9b6ff41 100644 --- a/install.sh +++ b/install.sh @@ -22,16 +22,7 @@ installRequirements() { export DEBIAN_FRONTEND=noninteractive apt-get update apt-get install \ - software-properties-common \ - git \ - build-essential \ - libssl-dev \ - libffi-dev \ - python3-dev \ - python3-pip \ - python3-setuptools \ python3-virtualenv \ - bind9-host \ jq -y } @@ -39,9 +30,9 @@ getAlgo() { [ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo cd algo - python3 -m virtualenv --python="$(command -v python3)" .venv + python3 -m virtualenv --python="$(command -v python3)" .env # shellcheck source=/dev/null - . .venv/bin/activate + . .env/bin/activate python3 -m pip install -U pip virtualenv python3 -m pip install -r requirements.txt } @@ -50,7 +41,7 @@ publicIpFromInterface() { echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint." DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')" ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b') - export ENDPOINT=$ENDPOINT + export ENDPOINT="${ENDPOINT}" echo "Using ${ENDPOINT} as the endpoint" } @@ -66,7 +57,7 @@ publicIpFromMetadata() { fi if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then - export ENDPOINT=$ENDPOINT + export ENDPOINT="${ENDPOINT}" echo "Using ${ENDPOINT} as the endpoint" else publicIpFromInterface @@ -78,7 +69,7 @@ deployAlgo() { cd /opt/algo # shellcheck source=/dev/null - . .venv/bin/activate + . .env/bin/activate export HOME=/root export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp diff --git a/library/cloudstack_zones.py b/library/cloudstack_zones.py deleted file mode 100644 index 3e5d9a0..0000000 --- a/library/cloudstack_zones.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.cloudstack import ( - AnsibleCloudStack, - cs_argument_spec, - cs_required_together, -) - -DOCUMENTATION = ''' ---- -module: cloudstack_zones -short_description: List zones on Apache CloudStack based clouds. -description: - - List zones. -version_added: '0.1' -author: Julien Bachmann (@0xmilkmix) -extends_documentation_fragment: cloudstack -''' - -EXAMPLES = ''' -- name: List zones - cloudstack_zones: - register: _cs_zones -''' - -RETURN = ''' ---- -zone: - description: List of zones. - returned: success - type: list - sample: - [ - { - "allocationstate": "Enabled", - "dhcpprovider": "VirtualRouter", - "id": "", - "localstorageenabled": true, - "name": "ch-gva-2", - "networktype": "Basic", - "securitygroupsenabled": true, - "tags": [], - "zonetoken": "token" - }, - { - "allocationstate": "Enabled", - "dhcpprovider": "VirtualRouter", - "id": "", - "localstorageenabled": true, - "name": "ch-dk-2", - "networktype": "Basic", - "securitygroupsenabled": true, - "tags": [], - "zonetoken": "token" - }, - { - "allocationstate": "Enabled", - "dhcpprovider": "VirtualRouter", - "id": "", - "localstorageenabled": true, - "name": "at-vie-1", - "networktype": "Basic", - "securitygroupsenabled": true, - "tags": [], - "zonetoken": "token" - }, - { - "allocationstate": "Enabled", - "dhcpprovider": "VirtualRouter", - "id": "", - "localstorageenabled": true, - "name": "de-fra-1", - "networktype": "Basic", - "securitygroupsenabled": true, - "tags": [], - "zonetoken": "token" - } - ] -''' - -class AnsibleCloudStackZones(AnsibleCloudStack): - - def __init__(self, module): - super(AnsibleCloudStackZones, self).__init__(module) - self.zones = None - - def get_zones(self): - args = {} - if not self.zones: - zones = self.query_api('listZones', **args) - if zones: - self.zones = zones - return self.zones - -def main(): - module = AnsibleModule(argument_spec={}) - acs_zones = AnsibleCloudStackZones(module) - result = acs_zones.get_zones() - module.exit_json(**result) - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/library/lightsail.py b/library/lightsail.py deleted file mode 100644 index 99e49ac..0000000 --- a/library/lightsail.py +++ /dev/null @@ -1,551 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' ---- -module: lightsail -short_description: Create or delete a virtual machine instance in AWS Lightsail -description: - - Creates or instances in AWS Lightsail and optionally wait for it to be 'running'. -version_added: "2.4" -author: "Nick Ball (@nickball)" -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent', 'running', 'restarted', 'stopped'] - name: - description: - - Name of the instance - required: true - default : null - zone: - description: - - AWS availability zone in which to launch the instance. Required when state='present' - required: false - default: null - blueprint_id: - description: - - ID of the instance blueprint image. Required when state='present' - required: false - default: null - bundle_id: - description: - - Bundle of specification info for the instance. Required when state='present' - required: false - default: null - user_data: - description: - - Launch script that can configure the instance with additional data - required: false - default: null - key_pair_name: - description: - - Name of the key pair to use with the instance - required: false - default: null - wait: - description: - - Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned - default: "yes" - choices: [ "yes", "no" ] - wait_timeout: - description: - - How long before wait gives up, in seconds. - default: 300 - open_ports: - description: - - Adds public ports to an Amazon Lightsail instance. - default: null - suboptions: - from_port: - description: Begin of the range - required: true - default: null - to_port: - description: End of the range - required: true - default: null - protocol: - description: Accepted traffic protocol. - required: true - choices: - - udp - - tcp - - all - default: null -requirements: - - "python >= 2.6" - - boto3 - -extends_documentation_fragment: - - aws - - ec2 -''' - - -EXAMPLES = ''' -# Create a new Lightsail instance, register the instance details -- lightsail: - state: present - name: myinstance - region: us-east-1 - zone: us-east-1a - blueprint_id: ubuntu_16_04 - bundle_id: nano_1_0 - key_pair_name: id_rsa - user_data: " echo 'hello world' > /home/ubuntu/test.txt" - wait_timeout: 500 - open_ports: - - from_port: 4500 - to_port: 4500 - protocol: udp - - from_port: 500 - to_port: 500 - protocol: udp - register: my_instance - -- debug: - msg: "Name is {{ my_instance.instance.name }}" - -- debug: - msg: "IP is {{ my_instance.instance.publicIpAddress }}" - -# Delete an instance if present -- lightsail: - state: absent - region: us-east-1 - name: myinstance - -''' - -RETURN = ''' -changed: - description: if a snapshot has been modified/created - returned: always - type: bool - sample: - changed: true -instance: - description: instance data - returned: always - type: dict - sample: - arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87" - blueprint_id: "ubuntu_16_04" - blueprint_name: "Ubuntu" - bundle_id: "nano_1_0" - created_at: "2017-03-27T08:38:59.714000-04:00" - hardware: - cpu_count: 1 - ram_size_in_gb: 0.5 - is_static_ip: false - location: - availability_zone: "us-east-1a" - region_name: "us-east-1" - name: "my_instance" - networking: - monthly_transfer: - gb_per_month_allocated: 1024 - ports: - - access_direction: "inbound" - access_from: "Anywhere (0.0.0.0/0)" - access_type: "public" - common_name: "" - from_port: 80 - protocol: tcp - to_port: 80 - - access_direction: "inbound" - access_from: "Anywhere (0.0.0.0/0)" - access_type: "public" - common_name: "" - from_port: 22 - protocol: tcp - to_port: 22 - private_ip_address: "172.26.8.14" - public_ip_address: "34.207.152.202" - resource_type: "Instance" - ssh_key_name: "keypair" - state: - code: 16 - name: running - support_code: "588307843083/i-0997c97831ee21e33" - username: "ubuntu" -''' - -import time -import traceback - -try: - import botocore - HAS_BOTOCORE = True -except ImportError: - HAS_BOTOCORE = False - -try: - import boto3 -except ImportError: - # will be caught by imported HAS_BOTO3 - pass - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, - HAS_BOTO3, camel_dict_to_snake_dict) - - -def create_instance(module, client, instance_name): - """ - Create an instance - - module: Ansible module object - client: authenticated lightsail connection object - instance_name: name of instance to delete - - Returns a dictionary of instance information - about the new instance. - - """ - - changed = False - - # Check if instance already exists - inst = None - try: - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != 'NotFoundException': - module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) - - zone = module.params.get('zone') - blueprint_id = module.params.get('blueprint_id') - bundle_id = module.params.get('bundle_id') - user_data = module.params.get('user_data') - user_data = '' if user_data is None else user_data - - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - wait_max = time.time() + wait_timeout - - if module.params.get('key_pair_name'): - key_pair_name = module.params.get('key_pair_name') - else: - key_pair_name = '' - - if module.params.get('open_ports'): - open_ports = module.params.get('open_ports') - else: - open_ports = '[]' - - resp = None - if inst is None: - try: - resp = client.create_instances( - instanceNames=[ - instance_name - ], - availabilityZone=zone, - blueprintId=blueprint_id, - bundleId=bundle_id, - userData=user_data, - keyPairName=key_pair_name, - ) - resp = resp['operations'][0] - except botocore.exceptions.ClientError as e: - module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e)) - - inst = _find_instance_info(client, instance_name) - - # Wait for instance to become running - if wait: - while (wait_max > time.time()) and (inst is not None and inst['state']['name'] != "running"): - try: - time.sleep(2) - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": - module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name), - exception=traceback.format_exc()) - elif e.response['Error']['Code'] == "RequestExpired": - module.fail_json(msg="RequestExpired: Failed to start instance {0}.".format(instance_name), exception=traceback.format_exc()) - time.sleep(1) - - # Timed out - if wait and not changed and wait_max <= time.time(): - module.fail_json(msg="Wait for instance start timeout at %s" % time.asctime()) - - # Attempt to open ports - if open_ports: - if inst is not None: - try: - for o in open_ports: - resp = client.open_instance_public_ports( - instanceName=instance_name, - portInfo={ - 'fromPort': o['from_port'], - 'toPort': o['to_port'], - 'protocol': o['protocol'] - } - ) - except botocore.exceptions.ClientError as e: - module.fail_json(msg='Error opening ports for instance {0}, error: {1}'.format(instance_name, e)) - - changed = True - - return (changed, inst) - - -def delete_instance(module, client, instance_name): - """ - Terminates an instance - - module: Ansible module object - client: authenticated lightsail connection object - instance_name: name of instance to delete - - Returns a dictionary of instance information - about the instance deleted (pre-deletion). - - If the instance to be deleted is running - "changed" will be set to False. - - """ - - # It looks like deleting removes the instance immediately, nothing to wait for - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - wait_max = time.time() + wait_timeout - - changed = False - - inst = None - try: - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != 'NotFoundException': - module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) - - # Wait for instance to exit transition state before deleting - if wait: - while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'): - try: - time.sleep(5) - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": - module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name), - exception=traceback.format_exc()) - elif e.response['Error']['Code'] == "RequestExpired": - module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc()) - # sleep and retry - time.sleep(10) - - # Attempt to delete - if inst is not None: - while not changed and ((wait and wait_max > time.time()) or (not wait)): - try: - client.delete_instance(instanceName=instance_name) - changed = True - except botocore.exceptions.ClientError as e: - module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e)) - - # Timed out - if wait and not changed and wait_max <= time.time(): - module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime()) - - return (changed, inst) - - -def restart_instance(module, client, instance_name): - """ - Reboot an existing instance - - module: Ansible module object - client: authenticated lightsail connection object - instance_name: name of instance to reboot - - Returns a dictionary of instance information - about the restarted instance - - If the instance was not able to reboot, - "changed" will be set to False. - - Wait will not apply here as this is an OS-level operation - """ - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - wait_max = time.time() + wait_timeout - - changed = False - - inst = None - try: - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != 'NotFoundException': - module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) - - # Wait for instance to exit transition state before state change - if wait: - while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'): - try: - time.sleep(5) - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": - module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name), - exception=traceback.format_exc()) - elif e.response['Error']['Code'] == "RequestExpired": - module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc()) - time.sleep(3) - - # send reboot - if inst is not None: - try: - client.reboot_instance(instanceName=instance_name) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != 'NotFoundException': - module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e)) - changed = True - - return (changed, inst) - - -def startstop_instance(module, client, instance_name, state): - """ - Starts or stops an existing instance - - module: Ansible module object - client: authenticated lightsail connection object - instance_name: name of instance to start/stop - state: Target state ("running" or "stopped") - - Returns a dictionary of instance information - about the instance started/stopped - - If the instance was not able to state change, - "changed" will be set to False. - - """ - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - wait_max = time.time() + wait_timeout - - changed = False - - inst = None - try: - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['Error']['Code'] != 'NotFoundException': - module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e)) - - # Wait for instance to exit transition state before state change - if wait: - while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'): - try: - time.sleep(5) - inst = _find_instance_info(client, instance_name) - except botocore.exceptions.ClientError as e: - if e.response['ResponseMetadata']['HTTPStatusCode'] == "403": - module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name), - exception=traceback.format_exc()) - elif e.response['Error']['Code'] == "RequestExpired": - module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc()) - time.sleep(1) - - # Try state change - if inst is not None and inst['state']['name'] != state: - try: - if state == 'running': - client.start_instance(instanceName=instance_name) - else: - client.stop_instance(instanceName=instance_name) - except botocore.exceptions.ClientError as e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e)) - changed = True - # Grab current instance info - inst = _find_instance_info(client, instance_name) - - return (changed, inst) - - -def core(module): - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if not region: - module.fail_json(msg='region must be specified') - - client = None - try: - client = boto3_conn(module, conn_type='client', resource='lightsail', - region=region, endpoint=ec2_url, **aws_connect_kwargs) - except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: - module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc()) - - changed = False - state = module.params['state'] - name = module.params['name'] - - if state == 'absent': - changed, instance_dict = delete_instance(module, client, name) - elif state in ('running', 'stopped'): - changed, instance_dict = startstop_instance(module, client, name, state) - elif state == 'restarted': - changed, instance_dict = restart_instance(module, client, name) - elif state == 'present': - changed, instance_dict = create_instance(module, client, name) - - module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict)) - - -def _find_instance_info(client, instance_name): - ''' handle exceptions where this function is called ''' - inst = None - try: - inst = client.get_instance(instanceName=instance_name) - except botocore.exceptions.ClientError as e: - raise - return inst['instance'] - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - name=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']), - zone=dict(type='str'), - blueprint_id=dict(type='str'), - bundle_id=dict(type='str'), - key_pair_name=dict(type='str'), - user_data=dict(type='str'), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=300), - open_ports=dict(type='list') - )) - - module = AnsibleModule(argument_spec=argument_spec) - - if not HAS_BOTO3: - module.fail_json(msg='Python module "boto3" is missing, please install it') - - if not HAS_BOTOCORE: - module.fail_json(msg='Python module "botocore" is missing, please install it') - - try: - core(module) - except (botocore.exceptions.ClientError, Exception) as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/library/linode_stackscript_v4.py b/library/linode_stackscript_v4.py new file mode 100644 index 0000000..4e8ddc0 --- /dev/null +++ b/library/linode_stackscript_v4.py @@ -0,0 +1,113 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible.module_utils.linode import get_user_agent + +LINODE_IMP_ERR = None +try: + from linode_api4 import StackScript, LinodeClient + HAS_LINODE_DEPENDENCY = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE_DEPENDENCY = False + + +def create_stackscript(module, client, **kwargs): + """Creates a stackscript and handles return format.""" + try: + response = client.linode.stackscript_create(**kwargs) + return response._raw_json + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + +def stackscript_available(module, client): + """Try to retrieve a stackscript.""" + try: + label = module.params['label'] + desc = module.params['description'] + + result = client.linode.stackscripts(StackScript.label == label, + StackScript.description == desc, + mine_only=True + ) + return result[0] + except IndexError: + return None + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + +def initialise_module(): + """Initialise the module parameter specification.""" + return AnsibleModule( + argument_spec=dict( + label=dict(type='str', required=True), + state=dict( + type='str', + required=True, + choices=['present', 'absent'] + ), + access_token=dict( + type='str', + required=True, + no_log=True, + fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), + ), + script=dict(type='str', required=True), + images=dict(type='list', required=True), + description=dict(type='str', required=False), + public=dict(type='bool', required=False, default=False), + ), + supports_check_mode=False + ) + + +def build_client(module): + """Build a LinodeClient.""" + return LinodeClient( + module.params['access_token'], + user_agent=get_user_agent('linode_v4_module') + ) + + +def main(): + """Module entrypoint.""" + module = initialise_module() + + if not HAS_LINODE_DEPENDENCY: + module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) + + client = build_client(module) + stackscript = stackscript_available(module, client) + + if module.params['state'] == 'present' and stackscript is not None: + module.exit_json(changed=False, stackscript=stackscript._raw_json) + + elif module.params['state'] == 'present' and stackscript is None: + stackscript_json = create_stackscript( + module, client, + label=module.params['label'], + script=module.params['script'], + images=module.params['images'], + desc=module.params['description'], + public=module.params['public'], + ) + module.exit_json(changed=True, stackscript=stackscript_json) + + elif module.params['state'] == 'absent' and stackscript is not None: + stackscript.delete() + module.exit_json(changed=True, stackscript=stackscript._raw_json) + + elif module.params['state'] == 'absent' and stackscript is None: + module.exit_json(changed=False, stackscript={}) + + +if __name__ == "__main__": + main() diff --git a/library/linode_v4.py b/library/linode_v4.py new file mode 100644 index 0000000..450db0c --- /dev/null +++ b/library/linode_v4.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ +# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback + +from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib +from ansible.module_utils.linode import get_user_agent + +LINODE_IMP_ERR = None +try: + from linode_api4 import Instance, LinodeClient + HAS_LINODE_DEPENDENCY = True +except ImportError: + LINODE_IMP_ERR = traceback.format_exc() + HAS_LINODE_DEPENDENCY = False + + +def create_linode(module, client, **kwargs): + """Creates a Linode instance and handles return format.""" + if kwargs['root_pass'] is None: + kwargs.pop('root_pass') + + try: + response = client.linode.instance_create(**kwargs) + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + try: + if isinstance(response, tuple): + instance, root_pass = response + instance_json = instance._raw_json + instance_json.update({'root_pass': root_pass}) + return instance_json + else: + return response._raw_json + except TypeError: + module.fail_json(msg='Unable to parse Linode instance creation' + ' response. Please raise a bug against this' + ' module on https://github.com/ansible/ansible/issues' + ) + + +def maybe_instance_from_label(module, client): + """Try to retrieve an instance based on a label.""" + try: + label = module.params['label'] + result = client.linode.instances(Instance.label == label) + return result[0] + except IndexError: + return None + except Exception as exception: + module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception) + + +def initialise_module(): + """Initialise the module parameter specification.""" + return AnsibleModule( + argument_spec=dict( + label=dict(type='str', required=True), + state=dict( + type='str', + required=True, + choices=['present', 'absent'] + ), + access_token=dict( + type='str', + required=True, + no_log=True, + fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), + ), + authorized_keys=dict(type='list', required=False), + group=dict(type='str', required=False), + image=dict(type='str', required=False), + region=dict(type='str', required=False), + root_pass=dict(type='str', required=False, no_log=True), + tags=dict(type='list', required=False), + type=dict(type='str', required=False), + stackscript_id=dict(type='int', required=False), + ), + supports_check_mode=False, + required_one_of=( + ['state', 'label'], + ), + required_together=( + ['region', 'image', 'type'], + ) + ) + + +def build_client(module): + """Build a LinodeClient.""" + return LinodeClient( + module.params['access_token'], + user_agent=get_user_agent('linode_v4_module') + ) + + +def main(): + """Module entrypoint.""" + module = initialise_module() + + if not HAS_LINODE_DEPENDENCY: + module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR) + + client = build_client(module) + instance = maybe_instance_from_label(module, client) + + if module.params['state'] == 'present' and instance is not None: + module.exit_json(changed=False, instance=instance._raw_json) + + elif module.params['state'] == 'present' and instance is None: + instance_json = create_linode( + module, client, + authorized_keys=module.params['authorized_keys'], + group=module.params['group'], + image=module.params['image'], + label=module.params['label'], + region=module.params['region'], + root_pass=module.params['root_pass'], + tags=module.params['tags'], + ltype=module.params['type'], + stackscript_id=module.params['stackscript_id'], + ) + module.exit_json(changed=True, instance=instance_json) + + elif module.params['state'] == 'absent' and instance is not None: + instance.delete() + module.exit_json(changed=True, instance=instance._raw_json) + + elif module.params['state'] == 'absent' and instance is None: + module.exit_json(changed=False, instance={}) + + +if __name__ == "__main__": + main() diff --git a/main.yml b/main.yml index 6f0e9ca..1b68d4c 100644 --- a/main.yml +++ b/main.yml @@ -9,7 +9,7 @@ - name: Ensure Ansible is not being run in a world writable directory assert: - that: _playbook_dir.stat.mode|int <= 0775 + that: _playbook_dir.stat.mode|int <= 775 msg: > Ansible is being run in a world writable directory ({{ playbook_dir }}), ignoring it as an ansible.cfg source. For more information see https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir @@ -23,27 +23,30 @@ - name: Set required ansible version as a fact set_fact: - required_ansible_version: - "{{ item | regex_replace('^ansible[\\s+]?(?P[=,>,<]+)[\\s+]?(?P\\d.\\d(.\\d+)?)$', - '{\"op\": \"\\g\",\"ver\": \"\\g\" }') }}" + required_ansible_version: "{{ item | regex_replace('^ansible[\\s+]?(?P[=,>,<]+)[\\s+]?(?P\\d.\\d+(.\\d+)?)$', '{\"op\": \"\\g\",\"ver\"\ + : \"\\g\" }') }}" when: '"ansible" in item' with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}" + - name: Just get the list from default pip + community.general.pip_package_info: + register: pip_package_info + - name: Verify Python meets Algo VPN requirements assert: - that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string)|float is version('3.6', '>=') + that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string) is version('3.8', '>=') msg: > Python version is not supported. - You must upgrade to at least Python 3.6 to use this version of Algo. + You must upgrade to at least Python 3.8 to use this version of Algo. See for more details - https://trailofbits.github.io/algo/troubleshooting.html#python-version-is-not-supported - name: Verify Ansible meets Algo VPN requirements assert: that: - - ansible_version.full is version(required_ansible_version.ver, required_ansible_version.op) + - pip_package_info.packages.pip.ansible.0.version is version(required_ansible_version.ver, required_ansible_version.op) - not ipaddr.failed msg: > - Ansible version is {{ ansible_version.full }}. + Ansible version is {{ pip_package_info.packages.pip.ansible.0.version }}. You must update the requirements to use this version of Algo. Try to run python3 -m pip install -U -r requirements.txt diff --git a/playbooks/cloud-post.yml b/playbooks/cloud-post.yml index 3ae2387..36b7d94 100644 --- a/playbooks/cloud-post.yml +++ b/playbooks/cloud-post.yml @@ -10,7 +10,7 @@ ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}" ansible_ssh_user: "{{ ansible_ssh_user|default('root') }}" ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}" - ansible_python_interpreter: "/usr/bin/python3" + ansible_python_interpreter: /usr/bin/python3 algo_provider: "{{ algo_provider }}" algo_server_name: "{{ algo_server_name }}" algo_ondemand_cellular: "{{ algo_ondemand_cellular }}" @@ -33,7 +33,7 @@ wait_for: port: "{{ ansible_ssh_port|default(22) }}" host: "{{ cloud_instance_ip }}" - search_regex: "OpenSSH" + search_regex: OpenSSH delay: 10 timeout: 320 state: present @@ -44,8 +44,7 @@ when: - pki_in_tmpfs - not algo_store_pki - - ansible_system == "Darwin" or - ansible_system == "Linux" + - ansible_system == "Darwin" or ansible_system == "Linux" - debug: var: IP_subject_alt_name diff --git a/playbooks/cloud-pre.yml b/playbooks/cloud-pre.yml index 44259bd..d513412 100644 --- a/playbooks/cloud-pre.yml +++ b/playbooks/cloud-pre.yml @@ -1,54 +1,53 @@ --- - block: - - name: Display the invocation environment - shell: > - ./algo-showenv.sh \ - 'algo_provider "{{ algo_provider }}"' \ - {% if ipsec_enabled %} - 'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \ - 'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \ - 'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \ - {% endif %} - 'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \ - 'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \ - 'wireguard_enabled "{{ wireguard_enabled }}"' \ - 'dns_encryption "{{ dns_encryption }}"' \ - > /dev/tty - tags: debug + - name: Display the invocation environment + shell: > + ./algo-showenv.sh \ + 'algo_provider "{{ algo_provider }}"' \ + {% if ipsec_enabled %} + 'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \ + 'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \ + 'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \ + {% endif %} + 'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \ + 'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \ + 'wireguard_enabled "{{ wireguard_enabled }}"' \ + 'dns_encryption "{{ dns_encryption }}"' \ + > /dev/tty || true + tags: debug - - name: Install the requirements - pip: - state: latest - name: - - pyOpenSSL - - jinja2==2.8 - - segno - tags: - - always - - skip_ansible_lint + - name: Install the requirements + pip: + state: present + name: + - pyOpenSSL>=0.15 + - segno + tags: + - always + - skip_ansible_lint delegate_to: localhost become: false - block: - - name: Generate the SSH private key - openssl_privatekey: - path: "{{ SSH_keys.private }}" - size: 2048 - mode: "0600" - type: RSA + - name: Generate the SSH private key + openssl_privatekey: + path: "{{ SSH_keys.private }}" + size: 4096 + mode: "0600" + type: RSA - - name: Generate the SSH public key - openssl_publickey: - path: "{{ SSH_keys.public }}" - privatekey_path: "{{ SSH_keys.private }}" - format: OpenSSH + - name: Generate the SSH public key + openssl_publickey: + path: "{{ SSH_keys.public }}" + privatekey_path: "{{ SSH_keys.private }}" + format: OpenSSH - - name: Copy the private SSH key to /tmp - copy: - src: "{{ SSH_keys.private }}" - dest: "{{ SSH_keys.private_tmp }}" - force: true - mode: '0600' - delegate_to: localhost - become: false + - name: Copy the private SSH key to /tmp + copy: + src: "{{ SSH_keys.private }}" + dest: "{{ SSH_keys.private_tmp }}" + force: true + mode: "0600" + delegate_to: localhost + become: false when: algo_provider != "local" diff --git a/playbooks/tmpfs/linux.yml b/playbooks/tmpfs/linux.yml index 64a9651..d36ef7b 100644 --- a/playbooks/tmpfs/linux.yml +++ b/playbooks/tmpfs/linux.yml @@ -1,5 +1,5 @@ --- - name: Linux | set OS specific facts set_fact: - tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}" + tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }} tmpfs_volume_path: /dev/shm diff --git a/playbooks/tmpfs/macos.yml b/playbooks/tmpfs/macos.yml index 72243da..2e56037 100644 --- a/playbooks/tmpfs/macos.yml +++ b/playbooks/tmpfs/macos.yml @@ -1,7 +1,7 @@ --- - name: MacOS | set OS specific facts set_fact: - tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}" + tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }} tmpfs_volume_path: /Volumes - name: MacOS | mount a ram disk @@ -9,4 +9,4 @@ /usr/sbin/diskutil info "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/" || /usr/sbin/diskutil erasevolume HFS+ "{{ tmpfs_volume_name }}" $(hdiutil attach -nomount ram://64000) args: - creates: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}" + creates: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }} diff --git a/playbooks/tmpfs/main.yml b/playbooks/tmpfs/main.yml index 32a01b7..628130e 100644 --- a/playbooks/tmpfs/main.yml +++ b/playbooks/tmpfs/main.yml @@ -9,7 +9,7 @@ - name: Set config paths as facts set_fact: - ipsec_pki_path: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/" + ipsec_pki_path: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/ - name: Update config paths add_host: diff --git a/playbooks/tmpfs/umount.yml b/playbooks/tmpfs/umount.yml index 681278e..6c002cc 100644 --- a/playbooks/tmpfs/umount.yml +++ b/playbooks/tmpfs/umount.yml @@ -1,26 +1,26 @@ --- - name: Linux | Delete the PKI directory file: - path: "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" + path: /{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/ state: absent when: facts.ansible_system == "Linux" - block: - - name: MacOS | check fs the ramdisk exists - command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}" - ignore_errors: true - changed_when: false - register: diskutil_info + - name: MacOS | check fs the ramdisk exists + command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}" + ignore_errors: true + changed_when: false + register: diskutil_info - - name: MacOS | unmount and eject the ram disk - shell: > - /usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" && - /usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}" - changed_when: false - when: diskutil_info.rc == 0 - register: result - until: result.rc == 0 - retries: 5 - delay: 3 + - name: MacOS | unmount and eject the ram disk + shell: > + /usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" && + /usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}" + changed_when: false + when: diskutil_info.rc == 0 + register: result + until: result.rc == 0 + retries: 5 + delay: 3 when: - facts.ansible_system == "Darwin" diff --git a/requirements.txt b/requirements.txt index 5026077..fc877fc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -ansible==2.9.22 -netaddr - +ansible==9.1.0 +jinja2~=3.0.3 +netaddr \ No newline at end of file diff --git a/roles/client/handlers/main.yml b/roles/client/handlers/main.yml index 33b013f..8fe8f5a 100644 --- a/roles/client/handlers/main.yml +++ b/roles/client/handlers/main.yml @@ -1,3 +1,3 @@ --- - name: restart strongswan - service: name=strongswan state=restarted + service: name={{ strongswan_service }} state=restarted diff --git a/roles/client/tasks/main.yml b/roles/client/tasks/main.yml index bc7a221..098da97 100644 --- a/roles/client/tasks/main.yml +++ b/roles/client/tasks/main.yml @@ -1,6 +1,6 @@ +--- - name: Gather Facts setup: - - name: Include system based facts and tasks import_tasks: systems/main.yml @@ -22,9 +22,9 @@ - name: Setup the ipsec config template: - src: "roles/strongswan/templates/client_ipsec.conf.j2" + src: roles/strongswan/templates/client_ipsec.conf.j2 dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf" - mode: '0644' + mode: "0644" with_items: - "{{ vpn_user }}" notify: @@ -32,9 +32,9 @@ - name: Setup the ipsec secrets template: - src: "roles/strongswan/templates/client_ipsec.secrets.j2" + src: roles/strongswan/templates/client_ipsec.secrets.j2 dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets" - mode: '0600' + mode: "0600" with_items: - "{{ vpn_user }}" notify: @@ -44,12 +44,12 @@ lineinfile: dest: "{{ item.dest }}" line: "{{ item.line }}" - create: yes + create: true with_items: - dest: "{{ configs_prefix }}/ipsec.conf" - line: "include ipsec.{{ IP_subject_alt_name }}.conf" + line: include ipsec.{{ IP_subject_alt_name }}.conf - dest: "{{ configs_prefix }}/ipsec.secrets" - line: "include ipsec.{{ IP_subject_alt_name }}.secrets" + line: include ipsec.{{ IP_subject_alt_name }}.secrets notify: - restart strongswan @@ -66,11 +66,11 @@ src: "{{ item.src }}" dest: "{{ item.dest }}" with_items: - - src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt" + - src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt dest: "{{ configs_prefix }}/ipsec.d/certs/{{ vpn_user }}.crt" - - src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem" + - src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem dest: "{{ configs_prefix }}/ipsec.d/cacerts/{{ IP_subject_alt_name }}.pem" - - src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key" + - src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key dest: "{{ configs_prefix }}/ipsec.d/private/{{ vpn_user }}.key" notify: - restart strongswan diff --git a/roles/client/tasks/systems/main.yml b/roles/client/tasks/systems/main.yml index ba24c93..62bf573 100644 --- a/roles/client/tasks/systems/main.yml +++ b/roles/client/tasks/systems/main.yml @@ -1,5 +1,4 @@ --- - - include_tasks: Debian.yml when: ansible_distribution == 'Debian' diff --git a/roles/cloud-azure/defaults/main.yml b/roles/cloud-azure/defaults/main.yml index ff3fa00..bfcd8b9 100644 --- a/roles/cloud-azure/defaults/main.yml +++ b/roles/cloud-azure/defaults/main.yml @@ -1,242 +1,210 @@ --- -_azure_regions: > - [ - { - "displayName": "East Asia", - "latitude": "22.267", - "longitude": "114.188", - "name": "eastasia", - "subscriptionId": null - }, - { - "displayName": "Southeast Asia", - "latitude": "1.283", - "longitude": "103.833", - "name": "southeastasia", - "subscriptionId": null - }, - { - "displayName": "Central US", - "latitude": "41.5908", - "longitude": "-93.6208", - "name": "centralus", - "subscriptionId": null - }, - { - "displayName": "East US", - "latitude": "37.3719", - "longitude": "-79.8164", - "name": "eastus", - "subscriptionId": null - }, - { - "displayName": "East US 2", - "latitude": "36.6681", - "longitude": "-78.3889", - "name": "eastus2", - "subscriptionId": null - }, - { - "displayName": "West US", - "latitude": "37.783", - "longitude": "-122.417", - "name": "westus", - "subscriptionId": null - }, - { - "displayName": "North Central US", - "latitude": "41.8819", - "longitude": "-87.6278", - "name": "northcentralus", - "subscriptionId": null - }, - { - "displayName": "South Central US", - "latitude": "29.4167", - "longitude": "-98.5", - "name": "southcentralus", - "subscriptionId": null - }, - { - "displayName": "North Europe", - "latitude": "53.3478", - "longitude": "-6.2597", - "name": "northeurope", - "subscriptionId": null - }, - { - "displayName": "West Europe", - "latitude": "52.3667", - "longitude": "4.9", - "name": "westeurope", - "subscriptionId": null - }, - { - "displayName": "Japan West", - "latitude": "34.6939", - "longitude": "135.5022", - "name": "japanwest", - "subscriptionId": null - }, - { - "displayName": "Japan East", - "latitude": "35.68", - "longitude": "139.77", - "name": "japaneast", - "subscriptionId": null - }, - { - "displayName": "Brazil South", - "latitude": "-23.55", - "longitude": "-46.633", - "name": "brazilsouth", - "subscriptionId": null - }, - { - "displayName": "Australia East", - "latitude": "-33.86", - "longitude": "151.2094", - "name": "australiaeast", - "subscriptionId": null - }, - { - "displayName": "Australia Southeast", - "latitude": "-37.8136", - "longitude": "144.9631", - "name": "australiasoutheast", - "subscriptionId": null - }, - { - "displayName": "South India", - "latitude": "12.9822", - "longitude": "80.1636", - "name": "southindia", - "subscriptionId": null - }, - { - "displayName": "Central India", - "latitude": "18.5822", - "longitude": "73.9197", - "name": "centralindia", - "subscriptionId": null - }, - { - "displayName": "West India", - "latitude": "19.088", - "longitude": "72.868", - "name": "westindia", - "subscriptionId": null - }, - { - "displayName": "Canada Central", - "latitude": "43.653", - "longitude": "-79.383", - "name": "canadacentral", - "subscriptionId": null - }, - { - "displayName": "Canada East", - "latitude": "46.817", - "longitude": "-71.217", - "name": "canadaeast", - "subscriptionId": null - }, - { - "displayName": "UK South", - "latitude": "50.941", - "longitude": "-0.799", - "name": "uksouth", - "subscriptionId": null - }, - { - "displayName": "UK West", - "latitude": "53.427", - "longitude": "-3.084", - "name": "ukwest", - "subscriptionId": null - }, - { - "displayName": "West Central US", - "latitude": "40.890", - "longitude": "-110.234", - "name": "westcentralus", - "subscriptionId": null - }, - { - "displayName": "West US 2", - "latitude": "47.233", - "longitude": "-119.852", - "name": "westus2", - "subscriptionId": null - }, - { - "displayName": "Korea Central", - "latitude": "37.5665", - "longitude": "126.9780", - "name": "koreacentral", - "subscriptionId": null - }, - { - "displayName": "Korea South", - "latitude": "35.1796", - "longitude": "129.0756", - "name": "koreasouth", - "subscriptionId": null - }, - { - "displayName": "France Central", - "latitude": "46.3772", - "longitude": "2.3730", - "name": "francecentral", - "subscriptionId": null - }, - { - "displayName": "France South", - "latitude": "43.8345", - "longitude": "2.1972", - "name": "francesouth", - "subscriptionId": null - }, - { - "displayName": "Australia Central", - "latitude": "-35.3075", - "longitude": "149.1244", - "name": "australiacentral", - "subscriptionId": null - }, - { - "displayName": "Australia Central 2", - "latitude": "-35.3075", - "longitude": "149.1244", - "name": "australiacentral2", - "subscriptionId": null - }, - { - "displayName": "UAE Central", - "latitude": "‎24.466667", - "longitude": "‎54.366669", - "name": "uaecentral", - "subscriptionId": null - }, - { - "displayName": "UAE North", - "latitude": "25.266666", - "longitude": "55.316666", - "name": "uaenorth", - "subscriptionId": null - }, - { - "displayName": "South Africa North", - "latitude": "-25.731340", - "longitude": "28.218370", - "name": "southafricanorth", - "subscriptionId": null - }, - { - "displayName": "South Africa West", - "latitude": "-34.075691", - "longitude": "18.843266", - "name": "southafricawest", - "subscriptionId": null - } - ] +# az account list-locations --query 'sort_by([].{name:name,displayName:displayName,regionalDisplayName:regionalDisplayName}, &name)' -o yaml +azure_regions: + - displayName: Asia + name: asia + regionalDisplayName: Asia + - displayName: Asia Pacific + name: asiapacific + regionalDisplayName: Asia Pacific + - displayName: Australia + name: australia + regionalDisplayName: Australia + - displayName: Australia Central + name: australiacentral + regionalDisplayName: (Asia Pacific) Australia Central + - displayName: Australia Central 2 + name: australiacentral2 + regionalDisplayName: (Asia Pacific) Australia Central 2 + - displayName: Australia East + name: australiaeast + regionalDisplayName: (Asia Pacific) Australia East + - displayName: Australia Southeast + name: australiasoutheast + regionalDisplayName: (Asia Pacific) Australia Southeast + - displayName: Brazil + name: brazil + regionalDisplayName: Brazil + - displayName: Brazil South + name: brazilsouth + regionalDisplayName: (South America) Brazil South + - displayName: Brazil Southeast + name: brazilsoutheast + regionalDisplayName: (South America) Brazil Southeast + - displayName: Canada + name: canada + regionalDisplayName: Canada + - displayName: Canada Central + name: canadacentral + regionalDisplayName: (Canada) Canada Central + - displayName: Canada East + name: canadaeast + regionalDisplayName: (Canada) Canada East + - displayName: Central India + name: centralindia + regionalDisplayName: (Asia Pacific) Central India + - displayName: Central US + name: centralus + regionalDisplayName: (US) Central US + - displayName: Central US EUAP + name: centraluseuap + regionalDisplayName: (US) Central US EUAP + - displayName: Central US (Stage) + name: centralusstage + regionalDisplayName: (US) Central US (Stage) + - displayName: East Asia + name: eastasia + regionalDisplayName: (Asia Pacific) East Asia + - displayName: East Asia (Stage) + name: eastasiastage + regionalDisplayName: (Asia Pacific) East Asia (Stage) + - displayName: East US + name: eastus + regionalDisplayName: (US) East US + - displayName: East US 2 + name: eastus2 + regionalDisplayName: (US) East US 2 + - displayName: East US 2 EUAP + name: eastus2euap + regionalDisplayName: (US) East US 2 EUAP + - displayName: East US 2 (Stage) + name: eastus2stage + regionalDisplayName: (US) East US 2 (Stage) + - displayName: East US (Stage) + name: eastusstage + regionalDisplayName: (US) East US (Stage) + - displayName: Europe + name: europe + regionalDisplayName: Europe + - displayName: France Central + name: francecentral + regionalDisplayName: (Europe) France Central + - displayName: France South + name: francesouth + regionalDisplayName: (Europe) France South + - displayName: Germany North + name: germanynorth + regionalDisplayName: (Europe) Germany North + - displayName: Germany West Central + name: germanywestcentral + regionalDisplayName: (Europe) Germany West Central + - displayName: Global + name: global + regionalDisplayName: Global + - displayName: India + name: india + regionalDisplayName: India + - displayName: Japan + name: japan + regionalDisplayName: Japan + - displayName: Japan East + name: japaneast + regionalDisplayName: (Asia Pacific) Japan East + - displayName: Japan West + name: japanwest + regionalDisplayName: (Asia Pacific) Japan West + - displayName: Jio India Central + name: jioindiacentral + regionalDisplayName: (Asia Pacific) Jio India Central + - displayName: Jio India West + name: jioindiawest + regionalDisplayName: (Asia Pacific) Jio India West + - displayName: Korea Central + name: koreacentral + regionalDisplayName: (Asia Pacific) Korea Central + - displayName: Korea South + name: koreasouth + regionalDisplayName: (Asia Pacific) Korea South + - displayName: North Central US + name: northcentralus + regionalDisplayName: (US) North Central US + - displayName: North Central US (Stage) + name: northcentralusstage + regionalDisplayName: (US) North Central US (Stage) + - displayName: North Europe + name: northeurope + regionalDisplayName: (Europe) North Europe + - displayName: Norway East + name: norwayeast + regionalDisplayName: (Europe) Norway East + - displayName: Norway West + name: norwaywest + regionalDisplayName: (Europe) Norway West + - displayName: Qatar Central + name: qatarcentral + regionalDisplayName: (Europe) Qatar Central + - displayName: South Africa North + name: southafricanorth + regionalDisplayName: (Africa) South Africa North + - displayName: South Africa West + name: southafricawest + regionalDisplayName: (Africa) South Africa West + - displayName: South Central US + name: southcentralus + regionalDisplayName: (US) South Central US + - displayName: South Central US (Stage) + name: southcentralusstage + regionalDisplayName: (US) South Central US (Stage) + - displayName: Southeast Asia + name: southeastasia + regionalDisplayName: (Asia Pacific) Southeast Asia + - displayName: Southeast Asia (Stage) + name: southeastasiastage + regionalDisplayName: (Asia Pacific) Southeast Asia (Stage) + - displayName: South India + name: southindia + regionalDisplayName: (Asia Pacific) South India + - displayName: Sweden Central + name: swedencentral + regionalDisplayName: (Europe) Sweden Central + - displayName: Sweden South + name: swedensouth + regionalDisplayName: (Europe) Sweden South + - displayName: Switzerland North + name: switzerlandnorth + regionalDisplayName: (Europe) Switzerland North + - displayName: Switzerland West + name: switzerlandwest + regionalDisplayName: (Europe) Switzerland West + - displayName: UAE Central + name: uaecentral + regionalDisplayName: (Middle East) UAE Central + - displayName: UAE North + name: uaenorth + regionalDisplayName: (Middle East) UAE North + - displayName: United Kingdom + name: uk + regionalDisplayName: United Kingdom + - displayName: UK South + name: uksouth + regionalDisplayName: (Europe) UK South + - displayName: UK West + name: ukwest + regionalDisplayName: (Europe) UK West + - displayName: United States + name: unitedstates + regionalDisplayName: United States + - displayName: West Central US + name: westcentralus + regionalDisplayName: (US) West Central US + - displayName: West Europe + name: westeurope + regionalDisplayName: (Europe) West Europe + - displayName: West India + name: westindia + regionalDisplayName: (Asia Pacific) West India + - displayName: West US + name: westus + regionalDisplayName: (US) West US + - displayName: West US 2 + name: westus2 + regionalDisplayName: (US) West US 2 + - displayName: West US 2 (Stage) + name: westus2stage + regionalDisplayName: (US) West US 2 (Stage) + - displayName: West US 3 + name: westus3 + regionalDisplayName: (US) West US 3 + - displayName: West US (Stage) + name: westusstage + regionalDisplayName: (US) West US (Stage) diff --git a/roles/cloud-azure/files/deployment.json b/roles/cloud-azure/files/deployment.json index bfc1d6f..f488906 100644 --- a/roles/cloud-azure/files/deployment.json +++ b/roles/cloud-azure/files/deployment.json @@ -23,6 +23,9 @@ "imageReferenceVersion": { "type": "string" }, + "osDiskType": { + "type": "string" + }, "SshPort": { "type": "int" }, @@ -197,7 +200,10 @@ "version": "[parameters('imageReferenceVersion')]" }, "osDisk": { - "createOption": "FromImage" + "createOption": "FromImage", + "managedDisk": { + "storageAccountType": "[parameters('osDiskType')]" + } } }, "networkProfile": { diff --git a/roles/cloud-azure/tasks/main.yml b/roles/cloud-azure/tasks/main.yml index 2761544..dafbe0d 100644 --- a/roles/cloud-azure/tasks/main.yml +++ b/roles/cloud-azure/tasks/main.yml @@ -37,6 +37,8 @@ value: "{{ cloud_providers.azure.image.sku }}" imageReferenceVersion: value: "{{ cloud_providers.azure.image.version }}" + osDiskType: + value: "{{ cloud_providers.azure.osDisk.type }}" SshPort: value: "{{ ssh_port }}" UserData: diff --git a/roles/cloud-azure/tasks/prompts.yml b/roles/cloud-azure/tasks/prompts.yml index 0971720..99fd03b 100644 --- a/roles/cloud-azure/tasks/prompts.yml +++ b/roles/cloud-azure/tasks/prompts.yml @@ -6,25 +6,21 @@ subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}" - block: - - name: Set facts about the regions - set_fact: - azure_regions: "{{ _azure_regions|from_json | sort(attribute='name') }}" - - - name: Set the default region - set_fact: - default_region: >- - {% for r in azure_regions %} - {%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %} - {%- endfor %} - - - pause: - prompt: | - What region should the server be located in? + - name: Set the default region + set_fact: + default_region: >- {% for r in azure_regions %} - {{ loop.index }}. {{ r['displayName'] }} - {% endfor %} + {%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %} + {%- endfor %} + + - pause: + prompt: | + What region should the server be located in? + {% for r in azure_regions %} + {{ loop.index }}. {{ r['regionalDisplayName'] }} + {% endfor %} - Enter the number of your desired region - [{{ default_region }}] - register: _algo_region + Enter the number of your desired region + [{{ default_region }}] + register: _algo_region when: region is undefined diff --git a/roles/cloud-azure/tasks/venv.yml b/roles/cloud-azure/tasks/venv.yml index 4d32678..5b8d52b 100644 --- a/roles/cloud-azure/tasks/venv.yml +++ b/roles/cloud-azure/tasks/venv.yml @@ -1,42 +1,6 @@ --- - name: Install requirements pip: - name: - - packaging - - requests[security] - - azure-cli-core==2.0.35 - - azure-cli-nspkg==3.0.2 - - azure-common==1.1.11 - - azure-mgmt-authorization==0.51.1 - - azure-mgmt-batch==5.0.1 - - azure-mgmt-cdn==3.0.0 - - azure-mgmt-compute==4.4.0 - - azure-mgmt-containerinstance==1.4.0 - - azure-mgmt-containerregistry==2.0.0 - - azure-mgmt-containerservice==4.4.0 - - azure-mgmt-dns==2.1.0 - - azure-mgmt-keyvault==1.1.0 - - azure-mgmt-marketplaceordering==0.1.0 - - azure-mgmt-monitor==0.5.2 - - azure-mgmt-network==2.3.0 - - azure-mgmt-nspkg==2.0.0 - - azure-mgmt-redis==5.0.0 - - azure-mgmt-resource==2.1.0 - - azure-mgmt-rdbms==1.4.1 - - azure-mgmt-servicebus==0.5.3 - - azure-mgmt-sql==0.10.0 - - azure-mgmt-storage==3.1.0 - - azure-mgmt-trafficmanager==0.50.0 - - azure-mgmt-web==0.41.0 - - azure-nspkg==2.0.0 - - azure-storage==0.35.1 - - msrest==0.6.1 - - msrestazure==0.5.0 - - azure-keyvault==1.0.0a1 - - azure-graphrbac==0.40.0 - - azure-mgmt-cosmosdb==0.5.2 - - azure-mgmt-hdinsight==0.1.0 - - azure-mgmt-devtestlabs==3.0.0 - - azure-mgmt-loganalytics==0.2.0 + requirements: https://raw.githubusercontent.com/ansible-collections/azure/v1.13.0/requirements-azure.txt state: latest virtualenv_python: python3 diff --git a/roles/cloud-cloudstack/tasks/main.yml b/roles/cloud-cloudstack/tasks/main.yml index a3a7781..ea05938 100644 --- a/roles/cloud-cloudstack/tasks/main.yml +++ b/roles/cloud-cloudstack/tasks/main.yml @@ -26,7 +26,7 @@ end_port: "{{ item.end_port }}" cidr: "{{ item.range }}" with_items: - - { proto: tcp, start_port: '{{ ssh_port }}', end_port: '{{ ssh_port }}', range: 0.0.0.0/0 } + - { proto: tcp, start_port: "{{ ssh_port }}", end_port: "{{ ssh_port }}", range: 0.0.0.0/0 } - { proto: udp, start_port: 4500, end_port: 4500, range: 0.0.0.0/0 } - { proto: udp, start_port: 500, end_port: 500, range: 0.0.0.0/0 } - { proto: udp, start_port: "{{ wireguard_port }}", end_port: "{{ wireguard_port }}", range: 0.0.0.0/0 } @@ -54,5 +54,6 @@ ansible_ssh_port: "{{ ssh_port }}" cloudinit: true environment: - CLOUDSTACK_CONFIG: "{{ algo_cs_config }}" - CLOUDSTACK_REGION: "{{ algo_cs_region }}" + CLOUDSTACK_KEY: "{{ algo_cs_key }}" + CLOUDSTACK_SECRET: "{{ algo_cs_token }}" + CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}" diff --git a/roles/cloud-cloudstack/tasks/prompts.yml b/roles/cloud-cloudstack/tasks/prompts.yml index dc80dcf..df39a35 100644 --- a/roles/cloud-cloudstack/tasks/prompts.yml +++ b/roles/cloud-cloudstack/tasks/prompts.yml @@ -1,54 +1,65 @@ --- - block: - - pause: - prompt: | - Enter path for cloudstack.ini file (https://trailofbits.github.io/algo/cloud-cloudstack.html) - [~/.cloudstack.ini] - register: _cs_config - when: - - cs_config is undefined - - lookup('env', 'CLOUDSTACK_CONFIG') | length <= 0 - - - pause: - prompt: | - Specify region to use in cloudstack.ini file - [exoscale] - register: _cs_region - when: - - cs_region is undefined - - lookup('env', 'CLOUDSTACK_REGION') | length <= 0 - - - set_fact: - algo_cs_config: "{{ cs_config | default(_cs_config.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_CONFIG'), true) | default('~/.cloudstack.ini', true) }}" - algo_cs_region: "{{ cs_region | default(_cs_region.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_REGION'), true) | default('exoscale', true) }}" - - - name: Get zones on cloud - cloudstack_zones: - register: _cs_zones - environment: - CLOUDSTACK_CONFIG: "{{ algo_cs_config }}" - CLOUDSTACK_REGION: "{{ algo_cs_region }}" - - - name: Extract zones from output - set_fact: - cs_zones: "{{ _cs_zones['zone'] | sort(attribute='name') }}" - - - name: Set the default zone - set_fact: - default_zone: >- - {% for z in cs_zones %} - {%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %} - {%- endfor %} - - - pause: - prompt: | - What zone should the server be located in? + - pause: + prompt: | + Enter the API key (https://trailofbits.github.io/algo/cloud-cloudstack.html): + echo: false + register: _cs_key + when: + - cs_key is undefined + - lookup('env','CLOUDSTACK_KEY')|length <= 0 + + - pause: + prompt: | + Enter the API ssecret (https://trailofbits.github.io/algo/cloud-cloudstack.html): + echo: false + register: _cs_secret + when: + - cs_secret is undefined + - lookup('env','CLOUDSTACK_SECRET')|length <= 0 + + - pause: + prompt: | + Enter the API endpoint (https://trailofbits.github.io/algo/cloud-cloudstack.html) + [https://api.exoscale.com/compute] + register: _cs_url + when: + - cs_url is undefined + - lookup('env', 'CLOUDSTACK_ENDPOINT') | length <= 0 + + - set_fact: + algo_cs_key: "{{ cs_key | default(_cs_key.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_KEY'), true) }}" + algo_cs_token: "{{ cs_secret | default(_cs_secret.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_SECRET'), true) }}" + algo_cs_url: "{{ cs_url | default(_cs_url.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_ENDPOINT'), true) | default('https://api.exoscale.com/compute',\ + \ true) }}" + + - name: Get zones on cloud + cs_zone_info: + register: _cs_zones + environment: + CLOUDSTACK_KEY: "{{ algo_cs_key }}" + CLOUDSTACK_SECRET: "{{ algo_cs_token }}" + CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}" + + - name: Extract zones from output + set_fact: + cs_zones: "{{ _cs_zones['zones'] | sort(attribute='name') }}" + + - name: Set the default zone + set_fact: + default_zone: >- {% for z in cs_zones %} - {{ loop.index }}. {{ z['name'] }} - {% endfor %} + {%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %} + {%- endfor %} - Enter the number of your desired zone - [{{ default_zone }}] - register: _algo_region - when: region is undefined + - pause: + prompt: | + What zone should the server be located in? + {% for z in cs_zones %} + {{ loop.index }}. {{ z['name'] }} + {% endfor %} + Enter the number of your desired zone + [{{ default_zone }}] + register: _algo_region + when: region is undefined diff --git a/roles/cloud-digitalocean/tasks/main.yml b/roles/cloud-digitalocean/tasks/main.yml index 2013a22..952b4fc 100644 --- a/roles/cloud-digitalocean/tasks/main.yml +++ b/roles/cloud-digitalocean/tasks/main.yml @@ -2,14 +2,14 @@ - name: Include prompts import_tasks: prompts.yml -- name: "Upload the SSH key" +- name: Upload the SSH key digital_ocean_sshkey: oauth_token: "{{ algo_do_token }}" name: "{{ SSH_keys.comment }}" ssh_pub_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}" register: do_ssh_key -- name: "Creating a droplet..." +- name: Creating a droplet... digital_ocean_droplet: state: present name: "{{ algo_server_name }}" @@ -26,21 +26,25 @@ - Environment:Algo register: digital_ocean_droplet +# Return data is not idempotent +- set_fact: + droplet: "{{ digital_ocean_droplet.data.droplet | default(digital_ocean_droplet.data) }}" + - block: - - name: "Create a Floating IP" - digital_ocean_floating_ip: - state: present - oauth_token: "{{ algo_do_token }}" - droplet_id: "{{ digital_ocean_droplet.data.droplet.id }}" - register: digital_ocean_floating_ip + - name: Create a Floating IP + digital_ocean_floating_ip: + state: present + oauth_token: "{{ algo_do_token }}" + droplet_id: "{{ droplet.id }}" + register: digital_ocean_floating_ip - - name: Set the static ip as a fact - set_fact: - cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}" + - name: Set the static ip as a fact + set_fact: + cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}" when: alternative_ingress_ip - set_fact: - cloud_instance_ip: "{{ digital_ocean_droplet.data.ip_address }}" + cloud_instance_ip: "{{ (droplet.networks.v4 | selectattr('type', '==', 'public')).0.ip_address }}" ansible_ssh_user: algo ansible_ssh_port: "{{ ssh_port }}" cloudinit: true diff --git a/roles/cloud-digitalocean/tasks/prompts.yml b/roles/cloud-digitalocean/tasks/prompts.yml index b288d93..7f24e3f 100644 --- a/roles/cloud-digitalocean/tasks/prompts.yml +++ b/roles/cloud-digitalocean/tasks/prompts.yml @@ -18,13 +18,13 @@ method: GET status_code: 200 headers: - Content-Type: "application/json" - Authorization: "Bearer {{ algo_do_token }}" + Content-Type: application/json + Authorization: Bearer {{ algo_do_token }} register: _do_regions - name: Set facts about the regions set_fact: - do_regions: "{{ _do_regions.json.regions | sort(attribute='slug') }}" + do_regions: "{{ _do_regions.json.regions | selectattr('available', 'true') | sort(attribute='slug') }}" - name: Set default region set_fact: diff --git a/roles/cloud-ec2/files/stack.yaml b/roles/cloud-ec2/files/stack.yaml index 661d5dc..8c6cf47 100644 --- a/roles/cloud-ec2/files/stack.yaml +++ b/roles/cloud-ec2/files/stack.yaml @@ -20,9 +20,17 @@ Parameters: Type: String SshPort: Type: String + InstanceMarketTypeParameter: + Description: Launch a Spot instance or standard on-demand instance + Type: String + Default: on-demand + AllowedValues: + - spot + - on-demand Conditions: AllocateNewEIP: !Equals [!Ref UseThisElasticIP, ''] AssociateExistingEIP: !Not [!Equals [!Ref UseThisElasticIP, '']] + InstanceIsSpot: !Equals [spot, !Ref InstanceMarketTypeParameter] Resources: VPC: Type: AWS::EC2::VPC @@ -146,6 +154,15 @@ Resources: - Key: Name Value: !Ref AWS::StackName + EC2LaunchTemplate: + Type: AWS::EC2::LaunchTemplate + Condition: InstanceIsSpot # Only create this template if requested + Properties: # a spot instance_market_type in config.cfg + LaunchTemplateName: !Ref AWS::StackName + LaunchTemplateData: + InstanceMarketOptions: + MarketType: spot + EC2Instance: Type: AWS::EC2::Instance DependsOn: @@ -169,6 +186,14 @@ Resources: SubnetId: !Ref Subnet Ipv6AddressCount: 1 UserData: !Ref UserData + LaunchTemplate: + !If # Only if Conditions created "EC2LaunchTemplate" + - InstanceIsSpot + - + LaunchTemplateId: + !Ref EC2LaunchTemplate + Version: 1 + - !Ref AWS::NoValue # Else this LaunchTemplate not set Tags: - Key: Name Value: !Ref AWS::StackName diff --git a/roles/cloud-ec2/tasks/cloudformation.yml b/roles/cloud-ec2/tasks/cloudformation.yml index 4ddc8d6..f05ab37 100644 --- a/roles/cloud-ec2/tasks/cloudformation.yml +++ b/roles/cloud-ec2/tasks/cloudformation.yml @@ -4,7 +4,7 @@ aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" stack_name: "{{ stack_name }}" - state: "present" + state: present region: "{{ algo_region }}" template: roles/cloud-ec2/files/stack.yaml template_parameters: @@ -16,6 +16,7 @@ EbsEncrypted: "{{ encrypted }}" UserData: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}" SshPort: "{{ ssh_port }}" + InstanceMarketTypeParameter: "{{ cloud_providers.ec2.instance_market_type }}" tags: Environment: Algo register: stack diff --git a/roles/cloud-ec2/tasks/main.yml b/roles/cloud-ec2/tasks/main.yml index 450b267..5f68c92 100644 --- a/roles/cloud-ec2/tasks/main.yml +++ b/roles/cloud-ec2/tasks/main.yml @@ -6,13 +6,14 @@ import_tasks: prompts.yml - name: Locate official AMI for region - ec2_ami_facts: + ec2_ami_info: aws_access_key: "{{ access_key }}" aws_secret_key: "{{ secret_key }}" owners: "{{ cloud_providers.ec2.image.owner }}" region: "{{ algo_region }}" filters: - name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*" + architecture: "{{ cloud_providers.ec2.image.arch }}" + name: ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-*64-server-* register: ami_search - name: Set the ami id as a fact diff --git a/roles/cloud-ec2/tasks/prompts.yml b/roles/cloud-ec2/tasks/prompts.yml index f003f80..368922f 100644 --- a/roles/cloud-ec2/tasks/prompts.yml +++ b/roles/cloud-ec2/tasks/prompts.yml @@ -6,8 +6,8 @@ echo: false register: _aws_access_key when: - - aws_access_key is undefined - - lookup('env','AWS_ACCESS_KEY_ID')|length <= 0 + - aws_access_key is undefined + - lookup('env','AWS_ACCESS_KEY_ID')|length <= 0 - pause: prompt: | @@ -23,35 +23,35 @@ secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}" - block: - - name: Get regions - aws_region_facts: - aws_access_key: "{{ access_key }}" - aws_secret_key: "{{ secret_key }}" - region: us-east-1 - register: _aws_regions + - name: Get regions + aws_region_info: + aws_access_key: "{{ access_key }}" + aws_secret_key: "{{ secret_key }}" + region: us-east-1 + register: _aws_regions - - name: Set facts about the regions - set_fact: - aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}" + - name: Set facts about the regions + set_fact: + aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}" - - name: Set the default region - set_fact: - default_region: >- - {% for r in aws_regions %} - {%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %} - {%- endfor %} - - - pause: - prompt: | - What region should the server be located in? - (https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region) + - name: Set the default region + set_fact: + default_region: >- {% for r in aws_regions %} - {{ loop.index }}. {{ r['region_name'] }} - {% endfor %} + {%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %} + {%- endfor %} + + - pause: + prompt: | + What region should the server be located in? + (https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region) + {% for r in aws_regions %} + {{ loop.index }}. {{ r['region_name'] }} + {% endfor %} - Enter the number of your desired region - [{{ default_region }}] - register: _algo_region + Enter the number of your desired region + [{{ default_region }}] + register: _algo_region when: region is undefined - name: Set algo_region and stack_name facts @@ -63,26 +63,26 @@ stack_name: "{{ algo_server_name | replace('.', '-') }}" - block: - - name: Get existing available Elastic IPs - ec2_eip_facts: - aws_access_key: "{{ access_key }}" - aws_secret_key: "{{ secret_key }}" - region: "{{ algo_region }}" - register: raw_eip_addresses + - name: Get existing available Elastic IPs + ec2_eip_info: + aws_access_key: "{{ access_key }}" + aws_secret_key: "{{ secret_key }}" + region: "{{ algo_region }}" + register: raw_eip_addresses - - set_fact: - available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}" + - set_fact: + available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}" - - pause: - prompt: >- - What Elastic IP would you like to use? - {% for eip in available_eip_addresses %} - {{ loop.index }}. {{ eip['public_ip'] }} - {% endfor %} + - pause: + prompt: >- + What Elastic IP would you like to use? + {% for eip in available_eip_addresses %} + {{ loop.index }}. {{ eip['public_ip'] }} + {% endfor %} - Enter the number of your desired Elastic IP - register: _use_existing_eip + Enter the number of your desired Elastic IP + register: _use_existing_eip - - set_fact: - existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}" + - set_fact: + existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}" when: cloud_providers.ec2.use_existing_eip diff --git a/roles/cloud-gce/tasks/main.yml b/roles/cloud-gce/tasks/main.yml index ca68567..69aa56a 100644 --- a/roles/cloud-gce/tasks/main.yml +++ b/roles/cloud-gce/tasks/main.yml @@ -27,27 +27,27 @@ allowed: - ip_protocol: udp ports: - - '500' - - '4500' - - '{{ wireguard_port|string }}' + - "500" + - "4500" + - "{{ wireguard_port|string }}" - ip_protocol: tcp ports: - - '{{ ssh_port }}' + - "{{ ssh_port }}" - ip_protocol: icmp - block: - - name: External IP allocated - gcp_compute_address: - auth_kind: serviceaccount - service_account_file: "{{ credentials_file_path }}" - project: "{{ project_id }}" - name: "{{ algo_server_name }}" - region: "{{ algo_region }}" - register: gcp_compute_address + - name: External IP allocated + gcp_compute_address: + auth_kind: serviceaccount + service_account_file: "{{ credentials_file_path }}" + project: "{{ project_id }}" + name: "{{ algo_server_name }}" + region: "{{ algo_region }}" + register: gcp_compute_address - - name: Set External IP as a fact - set_fact: - external_ip: "{{ gcp_compute_address.address }}" + - name: Set External IP as a fact + set_fact: + external_ip: "{{ gcp_compute_address.address }}" when: cloud_providers.gce.external_static_ip - name: Instance created @@ -62,9 +62,9 @@ - auto_delete: true boot: true initialize_params: - source_image: "projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }}" + source_image: projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }} metadata: - ssh-keys: "algo:{{ ssh_public_key_lookup }}" + ssh-keys: algo:{{ ssh_public_key_lookup }} user-data: "{{ lookup('template', 'files/cloud-init/base.yml') }}" network_interfaces: - network: "{{ gcp_compute_network }}" @@ -74,7 +74,7 @@ type: ONE_TO_ONE_NAT tags: items: - - "environment-algo" + - environment-algo register: gcp_compute_instance - set_fact: diff --git a/roles/cloud-gce/tasks/prompts.yml b/roles/cloud-gce/tasks/prompts.yml index b8a3896..bc7eedd 100644 --- a/roles/cloud-gce/tasks/prompts.yml +++ b/roles/cloud-gce/tasks/prompts.yml @@ -9,7 +9,8 @@ - lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0 - set_fact: - credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}" + credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'),\ + \ true) }}" ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}" - set_fact: @@ -20,40 +21,40 @@ project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}" - block: - - name: Get regions - gcp_compute_location_info: - auth_kind: serviceaccount - service_account_file: "{{ credentials_file_path }}" - project: "{{ project_id }}" - scope: regions - filters: status=UP - register: gcp_compute_regions_info + - name: Get regions + gcp_compute_location_info: + auth_kind: serviceaccount + service_account_file: "{{ credentials_file_path }}" + project: "{{ project_id }}" + scope: regions + filters: status=UP + register: gcp_compute_regions_info - - name: Set facts about the regions - set_fact: - gce_regions: >- - [{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%} - '{{ region.name }}'{% if not loop.last %},{% endif %} - {%- endfor -%}] + - name: Set facts about the regions + set_fact: + gce_regions: >- + [{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%} + '{{ region.name }}'{% if not loop.last %},{% endif %} + {%- endfor -%}] - - name: Set facts about the default region - set_fact: - default_region: >- - {% for region in gce_regions %} - {%- if region == "us-east1" %}{{ loop.index }}{% endif %} - {%- endfor %} + - name: Set facts about the default region + set_fact: + default_region: >- + {% for region in gce_regions %} + {%- if region == "us-east1" %}{{ loop.index }}{% endif %} + {%- endfor %} - - pause: - prompt: | - What region should the server be located in? - (https://cloud.google.com/compute/docs/regions-zones/#locations) - {% for r in gce_regions %} - {{ loop.index }}. {{ r }} - {% endfor %} + - pause: + prompt: | + What region should the server be located in? + (https://cloud.google.com/compute/docs/regions-zones/#locations) + {% for r in gce_regions %} + {{ loop.index }}. {{ r }} + {% endfor %} - Enter the number of your desired region - [{{ default_region }}] - register: _gce_region + Enter the number of your desired region + [{{ default_region }}] + register: _gce_region when: region is undefined - name: Set region as a fact @@ -70,8 +71,8 @@ project: "{{ project_id }}" scope: zones filters: - - "name={{ algo_region }}-*" - - "status=UP" + - name={{ algo_region }}-* + - status=UP register: gcp_compute_zone_info - name: Set random available zone as a fact diff --git a/roles/cloud-hetzner/tasks/main.yml b/roles/cloud-hetzner/tasks/main.yml index cbae7c6..e30ae00 100644 --- a/roles/cloud-hetzner/tasks/main.yml +++ b/roles/cloud-hetzner/tasks/main.yml @@ -6,15 +6,15 @@ import_tasks: prompts.yml - name: Create an ssh key - hcloud_ssh_key: - name: "algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }}" + hetzner.hcloud.ssh_key: + name: algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }} public_key: "{{ lookup('file', SSH_keys.public) }}" state: present api_token: "{{ algo_hcloud_token }}" register: hcloud_ssh_key - name: Create a server... - hcloud_server: + hetzner.hcloud.server: name: "{{ algo_server_name }}" location: "{{ algo_hcloud_region }}" server_type: "{{ cloud_providers.hetzner.server_type }}" diff --git a/roles/cloud-hetzner/tasks/prompts.yml b/roles/cloud-hetzner/tasks/prompts.yml index be42abd..3663b7e 100644 --- a/roles/cloud-hetzner/tasks/prompts.yml +++ b/roles/cloud-hetzner/tasks/prompts.yml @@ -13,13 +13,13 @@ algo_hcloud_token: "{{ hcloud_token | default(_hcloud_token.user_input|default(None)) | default(lookup('env','HCLOUD_TOKEN'), true) }}" - name: Get regions - hcloud_datacenter_facts: + hetzner.hcloud.datacenter_info: api_token: "{{ algo_hcloud_token }}" register: _hcloud_regions -- name: Set facts about thre regions +- name: Set facts about the regions set_fact: - hcloud_regions: "{{ hcloud_datacenter_facts | sort(attribute='location') }}" + hcloud_regions: "{{ _hcloud_regions.hcloud_datacenter_info | sort(attribute='location') }}" - name: Set default region set_fact: diff --git a/roles/cloud-lightsail/files/stack.yaml b/roles/cloud-lightsail/files/stack.yaml new file mode 100644 index 0000000..8bb2135 --- /dev/null +++ b/roles/cloud-lightsail/files/stack.yaml @@ -0,0 +1,73 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: 'Algo VPN stack (LightSail)' +Parameters: + InstanceTypeParameter: + Type: String + Default: 'nano_2_0' + ImageIdParameter: + Type: String + Default: 'ubuntu_20_04' + WireGuardPort: + Type: String + Default: '51820' + SshPort: + Type: String + Default: '4160' + UserData: + Type: String + Default: 'true' +Resources: + Instance: + Type: AWS::Lightsail::Instance + Properties: + BlueprintId: + Ref: ImageIdParameter + BundleId: + Ref: InstanceTypeParameter + InstanceName: !Ref AWS::StackName + Networking: + Ports: + - AccessDirection: inbound + Cidrs: ['0.0.0.0/0'] + Ipv6Cidrs: ['::/0'] + CommonName: SSH + FromPort: !Ref SshPort + ToPort: !Ref SshPort + Protocol: tcp + - AccessDirection: inbound + Cidrs: ['0.0.0.0/0'] + Ipv6Cidrs: ['::/0'] + CommonName: WireGuard + FromPort: !Ref WireGuardPort + ToPort: !Ref WireGuardPort + Protocol: udp + - AccessDirection: inbound + Cidrs: ['0.0.0.0/0'] + Ipv6Cidrs: ['::/0'] + CommonName: IPSec-4500 + FromPort: 4500 + ToPort: 4500 + Protocol: udp + - AccessDirection: inbound + Cidrs: ['0.0.0.0/0'] + Ipv6Cidrs: ['::/0'] + CommonName: IPSec-500 + FromPort: 500 + ToPort: 500 + Protocol: udp + Tags: + - Key: Name + Value: !Ref AWS::StackName + UserData: !Ref UserData + + StaticIP: + Type: AWS::Lightsail::StaticIp + Properties: + AttachedTo: !Ref Instance + StaticIpName: !Join [ "-", [ !Ref AWS::StackName, "ip" ] ] + DependsOn: + - Instance + +Outputs: + IpAddress: + Value: !GetAtt [StaticIP, IpAddress] diff --git a/roles/cloud-lightsail/tasks/cloudformation.yml b/roles/cloud-lightsail/tasks/cloudformation.yml new file mode 100644 index 0000000..d3798a9 --- /dev/null +++ b/roles/cloud-lightsail/tasks/cloudformation.yml @@ -0,0 +1,19 @@ +--- +- name: Deploy the template + cloudformation: + aws_access_key: "{{ access_key }}" + aws_secret_key: "{{ secret_key }}" + stack_name: "{{ stack_name }}" + state: present + region: "{{ algo_region }}" + template: roles/cloud-lightsail/files/stack.yaml + template_parameters: + InstanceTypeParameter: "{{ cloud_providers.lightsail.size }}" + ImageIdParameter: "{{ cloud_providers.lightsail.image }}" + WireGuardPort: "{{ wireguard_port }}" + SshPort: "{{ ssh_port }}" + UserData: "{{ lookup('template', 'files/cloud-init/base.sh') }}" + tags: + Environment: Algo + Lightsail: true + register: stack diff --git a/roles/cloud-lightsail/tasks/main.yml b/roles/cloud-lightsail/tasks/main.yml index 0ee04b4..2d8d702 100644 --- a/roles/cloud-lightsail/tasks/main.yml +++ b/roles/cloud-lightsail/tasks/main.yml @@ -5,36 +5,11 @@ - name: Include prompts import_tasks: prompts.yml -- name: Create an instance - lightsail: - aws_access_key: "{{ access_key }}" - aws_secret_key: "{{ secret_key }}" - name: "{{ algo_server_name }}" - state: present - region: "{{ algo_region }}" - zone: "{{ algo_region }}a" - blueprint_id: "{{ cloud_providers.lightsail.image }}" - bundle_id: "{{ cloud_providers.lightsail.size }}" - wait_timeout: "300" - open_ports: - - from_port: "{{ ssh_port }}" - to_port: "{{ ssh_port }}" - protocol: tcp - - from_port: 4500 - to_port: 4500 - protocol: udp - - from_port: 500 - to_port: 500 - protocol: udp - - from_port: "{{ wireguard_port }}" - to_port: "{{ wireguard_port }}" - protocol: udp - user_data: | - {{ lookup('template', 'files/cloud-init/base.sh') }} - register: algo_instance +- name: Deploy the stack + import_tasks: cloudformation.yml - set_fact: - cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}" + cloud_instance_ip: "{{ stack.stack_outputs.IpAddress }}" ansible_ssh_user: algo ansible_ssh_port: "{{ ssh_port }}" cloudinit: true diff --git a/roles/cloud-lightsail/tasks/prompts.yml b/roles/cloud-lightsail/tasks/prompts.yml index b1584f4..49e6bd0 100644 --- a/roles/cloud-lightsail/tasks/prompts.yml +++ b/roles/cloud-lightsail/tasks/prompts.yml @@ -6,8 +6,8 @@ echo: false register: _aws_access_key when: - - aws_access_key is undefined - - lookup('env','AWS_ACCESS_KEY_ID')|length <= 0 + - aws_access_key is undefined + - lookup('env','AWS_ACCESS_KEY_ID')|length <= 0 - pause: prompt: | @@ -23,38 +23,39 @@ secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}" - block: - - name: Get regions - lightsail_region_facts: - aws_access_key: "{{ access_key }}" - aws_secret_key: "{{ secret_key }}" - region: us-east-1 - register: _lightsail_regions - - - name: Set facts about the regions - set_fact: - lightsail_regions: "{{ _lightsail_regions.data.regions | sort(attribute='name') }}" - - - name: Set the default region - set_fact: - default_region: >- - {% for r in lightsail_regions %} - {%- if r['name'] == "us-east-1" %}{{ loop.index }}{% endif %} - {%- endfor %} - - - pause: - prompt: | - What region should the server be located in? - (https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/) + - name: Get regions + lightsail_region_facts: + aws_access_key: "{{ access_key }}" + aws_secret_key: "{{ secret_key }}" + region: us-east-1 + register: _lightsail_regions + + - name: Set facts about the regions + set_fact: + lightsail_regions: "{{ _lightsail_regions.data.regions | sort(attribute='name') }}" + + - name: Set the default region + set_fact: + default_region: >- {% for r in lightsail_regions %} - {{ (loop.index|string + '.').ljust(3) }} {{ r['name'].ljust(20) }} {{ r['displayName'] }} - {% endfor %} - - Enter the number of your desired region - [{{ default_region }}] - register: _algo_region + {%- if r['name'] == "us-east-1" %}{{ loop.index }}{% endif %} + {%- endfor %} + + - pause: + prompt: | + What region should the server be located in? + (https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/) + {% for r in lightsail_regions %} + {{ (loop.index|string + '.').ljust(3) }} {{ r['name'].ljust(20) }} {{ r['displayName'] }} + {% endfor %} + + Enter the number of your desired region + [{{ default_region }}] + register: _algo_region when: region is undefined - set_fact: + stack_name: "{{ algo_server_name | replace('.', '-') }}" algo_region: >- {% if region is defined %}{{ region }} {%- elif _algo_region.user_input %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }} diff --git a/roles/cloud-linode/defaults/main.yml b/roles/cloud-linode/defaults/main.yml new file mode 100644 index 0000000..76a6249 --- /dev/null +++ b/roles/cloud-linode/defaults/main.yml @@ -0,0 +1,2 @@ +--- +linode_venv: "{{ playbook_dir }}/configs/.venvs/linode" diff --git a/roles/cloud-linode/tasks/main.yml b/roles/cloud-linode/tasks/main.yml new file mode 100644 index 0000000..8cdd47f --- /dev/null +++ b/roles/cloud-linode/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: Build python virtual environment + import_tasks: venv.yml + +- name: Include prompts + import_tasks: prompts.yml + +- name: Set facts + set_fact: + stackscript: | + {{ lookup('template', 'files/cloud-init/base.sh') }} + mkdir -p /var/lib/cloud/data/ || true + touch /var/lib/cloud/data/result.json + +- name: Create a stackscript + linode_stackscript_v4: + access_token: "{{ algo_linode_token }}" + label: "{{ algo_server_name }}" + state: present + description: Environment:Algo + images: + - "{{ cloud_providers.linode.image }}" + script: | + {{ stackscript }} + register: _linode_stackscript + +- name: Update the stackscript + uri: + url: https://api.linode.com/v4/linode/stackscripts/{{ _linode_stackscript.stackscript.id }} + method: PUT + body_format: json + body: + script: | + {{ stackscript }} + headers: + Content-Type: application/json + Authorization: Bearer {{ algo_linode_token }} + when: (_linode_stackscript.stackscript.script | hash('md5')) != (stackscript | hash('md5')) + +- name: Creating an instance... + linode_v4: + access_token: "{{ algo_linode_token }}" + label: "{{ algo_server_name }}" + state: present + region: "{{ algo_linode_region }}" + image: "{{ cloud_providers.linode.image }}" + type: "{{ cloud_providers.linode.type }}" + authorized_keys: "{{ public_key }}" + stackscript_id: "{{ _linode_stackscript.stackscript.id }}" + register: _linode + +- set_fact: + cloud_instance_ip: "{{ _linode.instance.ipv4[0] }}" + ansible_ssh_user: algo + ansible_ssh_port: "{{ ssh_port }}" + cloudinit: true diff --git a/roles/cloud-linode/tasks/prompts.yml b/roles/cloud-linode/tasks/prompts.yml new file mode 100644 index 0000000..84d85b9 --- /dev/null +++ b/roles/cloud-linode/tasks/prompts.yml @@ -0,0 +1,51 @@ +--- +- pause: + prompt: | + Enter your ACCESS token. (https://developers.linode.com/api/v4/#access-and-authentication): + echo: false + register: _linode_token + when: + - linode_token is undefined + - lookup('env','LINODE_API_TOKEN')|length <= 0 + +- name: Set the token as a fact + set_fact: + algo_linode_token: "{{ linode_token | default(_linode_token.user_input|default(None)) | default(lookup('env','LINODE_API_TOKEN'), true) }}" + +- name: Get regions + uri: + url: https://api.linode.com/v4/regions + method: GET + status_code: 200 + register: _linode_regions + +- name: Set facts about the regions + set_fact: + linode_regions: "{{ _linode_regions.json.data | sort(attribute='id') }}" + +- name: Set default region + set_fact: + default_region: >- + {% for r in linode_regions %} + {%- if r['id'] == "us-east" %}{{ loop.index }}{% endif %} + {%- endfor %} + +- pause: + prompt: | + What region should the server be located in? + {% for r in linode_regions %} + {{ loop.index }}. {{ r['id'] }} + {% endfor %} + + Enter the number of your desired region + [{{ default_region }}] + register: _algo_region + when: region is undefined + +- name: Set additional facts + set_fact: + algo_linode_region: >- + {% if region is defined %}{{ region }} + {%- elif _algo_region.user_input %}{{ linode_regions[_algo_region.user_input | int -1 ]['id'] }} + {%- else %}{{ linode_regions[default_region | int - 1]['id'] }}{% endif %} + public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}" diff --git a/roles/cloud-linode/tasks/venv.yml b/roles/cloud-linode/tasks/venv.yml new file mode 100644 index 0000000..ece831e --- /dev/null +++ b/roles/cloud-linode/tasks/venv.yml @@ -0,0 +1,7 @@ +--- +- name: Install requirements + pip: + name: + - linode_api4 + state: latest + virtualenv_python: python3 diff --git a/roles/cloud-openstack/tasks/main.yml b/roles/cloud-openstack/tasks/main.yml index e710def..ac6cbd3 100644 --- a/roles/cloud-openstack/tasks/main.yml +++ b/roles/cloud-openstack/tasks/main.yml @@ -7,14 +7,14 @@ import_tasks: venv.yml - name: Security group created - os_security_group: + openstack.cloud.security_group: state: "{{ state|default('present') }}" name: "{{ algo_server_name }}-security_group" description: AlgoVPN security group register: os_security_group - name: Security rules created - os_security_group_rule: + openstack.cloud.security_group_rule: state: "{{ state|default('present') }}" security_group: "{{ os_security_group.id }}" protocol: "{{ item.proto }}" @@ -22,29 +22,32 @@ port_range_max: "{{ item.port_max }}" remote_ip_prefix: "{{ item.range }}" with_items: - - { proto: tcp, port_min: '{{ ssh_port }}', port_max: '{{ ssh_port }}', range: 0.0.0.0/0 } + - { proto: tcp, port_min: "{{ ssh_port }}", port_max: "{{ ssh_port }}", range: 0.0.0.0/0 } - { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 } - { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 } - { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 } - { proto: udp, port_min: "{{ wireguard_port }}", port_max: "{{ wireguard_port }}", range: 0.0.0.0/0 } - name: Gather facts about flavors - os_flavor_facts: + openstack.cloud.compute_flavor_info: ram: "{{ cloud_providers.openstack.flavor_ram }}" + register: os_flavor - name: Gather facts about images - os_image_facts: + openstack.cloud.image_info: + register: os_image - name: Set image as a fact set_fact: image_id: "{{ item.id }}" - loop: "{{ openstack_image }}" + loop: "{{ os_image.openstack_image }}" when: - item.name == cloud_providers.openstack.image - item.status == "active" - name: Gather facts about public networks - os_networks_facts: + openstack.cloud.networks_info: + register: os_network - name: Set the network as a fact set_fact: @@ -53,15 +56,15 @@ - item['router:external']|default(omit) - item['admin_state_up']|default(omit) - item['status'] == 'ACTIVE' - with_items: "{{ openstack_networks }}" + with_items: "{{ os_network.openstack_networks }}" - name: Set facts set_fact: - flavor_id: "{{ (openstack_flavors | sort(attribute='ram'))[0]['id'] }}" + flavor_id: "{{ (os_flavor.openstack_flavors | sort(attribute='ram'))[0]['id'] }}" security_group_name: "{{ os_security_group['secgroup']['name'] }}" - name: Server created - os_server: + openstack.cloud.server: state: "{{ state|default('present') }}" name: "{{ algo_server_name }}" image: "{{ image_id }}" diff --git a/roles/cloud-scaleway/tasks/main.yml b/roles/cloud-scaleway/tasks/main.yml index 3b290da..05c1d53 100644 --- a/roles/cloud-scaleway/tasks/main.yml +++ b/roles/cloud-scaleway/tasks/main.yml @@ -1,71 +1,74 @@ +--- - name: Include prompts import_tasks: prompts.yml - block: - - name: Gather Scaleway organizations facts - scaleway_organization_facts: + - name: Gather Scaleway organizations facts + scaleway_organization_info: + register: scaleway_org - - name: Get images - scaleway_image_facts: - region: "{{ algo_region }}" + - name: Get images + scaleway_image_info: + region: "{{ algo_region }}" + register: scaleway_image - - name: Set cloud specific facts - set_fact: - organization_id: "{{ scaleway_organization_facts[0]['id'] }}" - images: >- - [{% for i in scaleway_image_facts -%} - {% if i.name == cloud_providers.scaleway.image and - i.arch == cloud_providers.scaleway.arch -%} - '{{ i.id }}'{% if not loop.last %},{% endif %} - {%- endif -%} - {%- endfor -%}] + - name: Set cloud specific facts + set_fact: + organization_id: "{{ scaleway_org.scaleway_organization_info[0]['id'] }}" + images: >- + [{% for i in scaleway_image.scaleway_image_info -%} + {% if i.name == cloud_providers.scaleway.image and + i.arch == cloud_providers.scaleway.arch -%} + '{{ i.id }}'{% if not loop.last %},{% endif %} + {%- endif -%} + {%- endfor -%}] - - name: Create a server - scaleway_compute: - name: "{{ algo_server_name }}" - enable_ipv6: true - public_ip: dynamic - boot_type: local - state: present - image: "{{ images[0] }}" - organization: "{{ organization_id }}" - region: "{{ algo_region }}" - commercial_type: "{{ cloud_providers.scaleway.size }}" - wait: true - tags: - - Environment:Algo - - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} - register: scaleway_compute + - name: Create a server + scaleway_compute: + name: "{{ algo_server_name }}" + enable_ipv6: true + public_ip: dynamic + boot_type: local + state: present + image: "{{ images[0] }}" + organization: "{{ organization_id }}" + region: "{{ algo_region }}" + commercial_type: "{{ cloud_providers.scaleway.size }}" + wait: true + tags: + - Environment:Algo + - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} + register: scaleway_compute - - name: Patch the cloud-init - uri: - url: "https://cp-{{ algo_region }}.scaleway.com/servers/{{ scaleway_compute.msg.id }}/user_data/cloud-init" - method: PATCH - body: "{{ lookup('template', 'files/cloud-init/base.yml') }}" - status_code: 204 - headers: - Content-Type: "text/plain" - X-Auth-Token: "{{ algo_scaleway_token }}" + - name: Patch the cloud-init + uri: + url: https://cp-{{ algo_region }}.scaleway.com/servers/{{ scaleway_compute.msg.id }}/user_data/cloud-init + method: PATCH + body: "{{ lookup('template', 'files/cloud-init/base.yml') }}" + status_code: 204 + headers: + Content-Type: text/plain + X-Auth-Token: "{{ algo_scaleway_token }}" - - name: Start the server - scaleway_compute: - name: "{{ algo_server_name }}" - enable_ipv6: true - public_ip: dynamic - boot_type: local - state: running - image: "{{ images[0] }}" - organization: "{{ organization_id }}" - region: "{{ algo_region }}" - commercial_type: "{{ cloud_providers.scaleway.size }}" - wait: true - tags: - - Environment:Algo - - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} - register: algo_instance - until: algo_instance.msg.public_ip - retries: 3 - delay: 3 + - name: Start the server + scaleway_compute: + name: "{{ algo_server_name }}" + enable_ipv6: true + public_ip: dynamic + boot_type: local + state: running + image: "{{ images[0] }}" + organization: "{{ organization_id }}" + region: "{{ algo_region }}" + commercial_type: "{{ cloud_providers.scaleway.size }}" + wait: true + tags: + - Environment:Algo + - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} + register: algo_instance + until: algo_instance.msg.public_ip + retries: 3 + delay: 3 environment: SCW_TOKEN: "{{ algo_scaleway_token }}" diff --git a/roles/cloud-vultr/tasks/main.yml b/roles/cloud-vultr/tasks/main.yml index ff34709..ccbcd13 100644 --- a/roles/cloud-vultr/tasks/main.yml +++ b/roles/cloud-vultr/tasks/main.yml @@ -3,56 +3,56 @@ import_tasks: prompts.yml - block: - - name: Creating a firewall group - vultr_firewall_group: - name: "{{ algo_server_name }}" + - name: Creating a firewall group + vultr.cloud.firewall_group: + name: "{{ algo_server_name }}" - - name: Creating firewall rules - vultr_firewall_rule: - group: "{{ algo_server_name }}" - protocol: "{{ item.protocol }}" - port: "{{ item.port }}" - ip_version: "{{ item.ip }}" - cidr: "{{ item.cidr }}" - with_items: - - { protocol: tcp, port: "{{ ssh_port }}", ip: v4, cidr: "0.0.0.0/0" } - - { protocol: tcp, port: "{{ ssh_port }}", ip: v6, cidr: "::/0" } - - { protocol: udp, port: 500, ip: v4, cidr: "0.0.0.0/0" } - - { protocol: udp, port: 500, ip: v6, cidr: "::/0" } - - { protocol: udp, port: 4500, ip: v4, cidr: "0.0.0.0/0" } - - { protocol: udp, port: 4500, ip: v6, cidr: "::/0" } - - { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: "0.0.0.0/0" } - - { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" } + - name: Creating firewall rules + vultr.cloud.firewall_rule: + group: "{{ algo_server_name }}" + protocol: "{{ item.protocol }}" + port: "{{ item.port }}" + ip_type: "{{ item.ip }}" + subnet: "{{ item.cidr.split('/')[0] }}" + subnet_size: "{{ item.cidr.split('/')[1] }}" + with_items: + - { protocol: tcp, port: "{{ ssh_port }}", ip: v4, cidr: 0.0.0.0/0 } + - { protocol: tcp, port: "{{ ssh_port }}", ip: v6, cidr: "::/0" } + - { protocol: udp, port: 500, ip: v4, cidr: 0.0.0.0/0 } + - { protocol: udp, port: 500, ip: v6, cidr: "::/0" } + - { protocol: udp, port: 4500, ip: v4, cidr: 0.0.0.0/0 } + - { protocol: udp, port: 4500, ip: v6, cidr: "::/0" } + - { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: 0.0.0.0/0 } + - { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" } - - name: Upload the startup script - vultr_startup_script: - name: algo-startup - script: | - {{ lookup('template', 'files/cloud-init/base.sh') }} - mkdir -p /var/lib/cloud/data/ || true - touch /var/lib/cloud/data/result.json + - name: Upload the startup script + vultr.cloud.startup_script: + name: algo-startup + script: | + {{ lookup('template', 'files/cloud-init/base.yml') }} - - name: Creating a server - vultr_server: - name: "{{ algo_server_name }}" - startup_script: algo-startup - hostname: "{{ algo_server_name }}" - os: "{{ cloud_providers.vultr.os }}" - plan: "{{ cloud_providers.vultr.size }}" - region: "{{ algo_vultr_region }}" - firewall_group: "{{ algo_server_name }}" - state: started - tag: Environment:Algo - ipv6_enabled: true - auto_backup_enabled: false - notify_activate: false - register: vultr_server + - name: Creating a server + vultr.cloud.instance: + name: "{{ algo_server_name }}" + startup_script: algo-startup + hostname: "{{ algo_server_name }}" + os: "{{ cloud_providers.vultr.os }}" + plan: "{{ cloud_providers.vultr.size }}" + region: "{{ algo_vultr_region }}" + firewall_group: "{{ algo_server_name }}" + state: started + tags: + - Environment:Algo + enable_ipv6: true + backups: false + activation_email: false + register: vultr_server - - set_fact: - cloud_instance_ip: "{{ vultr_server.vultr_server.v4_main_ip }}" - ansible_ssh_user: algo - ansible_ssh_port: "{{ ssh_port }}" - cloudinit: true + - set_fact: + cloud_instance_ip: "{{ vultr_server.vultr_instance.main_ip }}" + ansible_ssh_user: algo + ansible_ssh_port: "{{ ssh_port }}" + cloudinit: true environment: - VULTR_API_CONFIG: "{{ algo_vultr_config }}" + VULTR_API_KEY: "{{ lookup('ini', 'key', section='default', file=algo_vultr_config) }}" diff --git a/roles/cloud-vultr/tasks/prompts.yml b/roles/cloud-vultr/tasks/prompts.yml index 1245b71..51e2ddd 100644 --- a/roles/cloud-vultr/tasks/prompts.yml +++ b/roles/cloud-vultr/tasks/prompts.yml @@ -42,7 +42,7 @@ What region should the server be located in? (https://www.vultr.com/locations/): {% for r in vultr_regions %} - {{ loop.index }}. {{ r['name'] }} + {{ loop.index }}. {{ r['name'] }} ({{ r['regioncode'] | lower }}) {% endfor %} Enter the number of your desired region @@ -54,5 +54,5 @@ set_fact: algo_vultr_region: >- {% if region is defined %}{{ region }} - {%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }} - {%- else %}{{ vultr_regions[default_region | int - 1]['name'] }}{% endif %} + {%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['regioncode'] | lower }} + {%- else %}{{ vultr_regions[default_region | int - 1]['regioncode'] | lower }}{% endif %} diff --git a/roles/common/handlers/main.yml b/roles/common/handlers/main.yml index 6bcae5c..f590f75 100644 --- a/roles/common/handlers/main.yml +++ b/roles/common/handlers/main.yml @@ -1,3 +1,4 @@ +--- - name: restart rsyslog service: name=rsyslog state=restarted @@ -13,6 +14,11 @@ state: restarted daemon_reload: true +- name: restart systemd-resolved + systemd: + name: systemd-resolved + state: restarted + - name: restart loopback bsd shell: > ifconfig lo100 destroy || true && diff --git a/roles/common/tasks/freebsd.yml b/roles/common/tasks/freebsd.yml index 9dbfb18..cb8361e 100644 --- a/roles/common/tasks/freebsd.yml +++ b/roles/common/tasks/freebsd.yml @@ -13,13 +13,12 @@ - name: Gather facts setup: - - name: Gather additional facts import_tasks: facts.yml - name: Set OS specific facts set_fact: - config_prefix: "/usr/local/" + config_prefix: /usr/local/ strongswan_shell: /usr/sbin/nologin strongswan_home: /var/empty root_group: wheel @@ -50,7 +49,7 @@ - name: Loopback included into the rc config blockinfile: dest: /etc/rc.conf - create: yes + create: true block: | cloned_interfaces="lo100" ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255" diff --git a/roles/common/tasks/iptables.yml b/roles/common/tasks/iptables.yml index e5b1061..463dc38 100644 --- a/roles/common/tasks/iptables.yml +++ b/roles/common/tasks/iptables.yml @@ -1,5 +1,4 @@ --- - - name: Iptables configured template: src: "{{ item.src }}" diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml index 0233096..2cfc6d7 100644 --- a/roles/common/tasks/main.yml +++ b/roles/common/tasks/main.yml @@ -6,6 +6,9 @@ tags: - update-users +- fail: + when: cloud_test|default(false)|bool + - include_tasks: ubuntu.yml when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout' tags: diff --git a/roles/common/tasks/ubuntu.yml b/roles/common/tasks/ubuntu.yml index 04e9cd8..15d2d91 100644 --- a/roles/common/tasks/ubuntu.yml +++ b/roles/common/tasks/ubuntu.yml @@ -1,7 +1,6 @@ --- - name: Gather facts setup: - - name: Cloud only tasks block: - name: Install software updates @@ -36,14 +35,23 @@ become: false when: algo_provider != "local" -- name: Include unatteded upgrades configuration +- name: Include unattended upgrades configuration import_tasks: unattended-upgrades.yml - name: Disable MOTD on login and SSHD replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}" with_items: - - { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' } - - { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' } + - { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/login } + - { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/sshd } + +- name: Ensure fallback resolvers are set + ini_file: + path: /etc/systemd/resolved.conf + section: Resolve + option: FallbackDNS + value: "{{ dns_servers.ipv4 | join(' ') }}" + notify: + - restart systemd-resolved - name: Loopback for services configured template: @@ -66,7 +74,7 @@ - name: Check apparmor support command: apparmor_status - ignore_errors: yes + ignore_errors: true changed_when: false register: apparmor_status @@ -90,6 +98,7 @@ - cgroup-tools - openssl - gnupg2 + - cron sysctl: - item: net.ipv4.ip_forward value: 1 @@ -108,9 +117,9 @@ apt: name: - linux-headers-generic - - "linux-headers-{{ ansible_kernel }}" + - linux-headers-{{ ansible_kernel }} state: present - when: install_headers + when: install_headers | bool - name: Configure the alternative ingress ip include_tasks: aip/main.yml diff --git a/roles/common/templates/rules.v4.j2 b/roles/common/templates/rules.v4.j2 index a4209f7..c127bdc 100644 --- a/roles/common/templates/rules.v4.j2 +++ b/roles/common/templates/rules.v4.j2 @@ -95,7 +95,7 @@ COMMIT -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT # Drop SMB/CIFS traffic that requests to be forwarded -A FORWARD -p tcp --dport 445 -j {{ "DROP" if block_smb else "ACCEPT" }} -# Drop NETBIOS trafic that requests to be forwarded +# Drop NETBIOS traffic that requests to be forwarded -A FORWARD -p udp -m multiport --ports 137,138 -j {{ "DROP" if block_netbios else "ACCEPT" }} -A FORWARD -p tcp -m multiport --ports 137,139 -j {{ "DROP" if block_netbios else "ACCEPT" }} diff --git a/roles/dns/files/apparmor.profile.dnscrypt-proxy b/roles/dns/files/apparmor.profile.dnscrypt-proxy index 51de03f..ba5a70e 100644 --- a/roles/dns/files/apparmor.profile.dnscrypt-proxy +++ b/roles/dns/files/apparmor.profile.dnscrypt-proxy @@ -14,7 +14,7 @@ /etc/dnscrypt-proxy/** r, /usr/bin/dnscrypt-proxy mr, - /tmp/public-resolvers.md* rw, + /var/cache/{private/,}dnscrypt-proxy/** rw, /tmp/*.tmp w, owner /tmp/*.tmp r, diff --git a/roles/dns/tasks/freebsd.yml b/roles/dns/tasks/freebsd.yml index 0f826c4..e7e6297 100644 --- a/roles/dns/tasks/freebsd.yml +++ b/roles/dns/tasks/freebsd.yml @@ -6,4 +6,4 @@ - name: Enable mac_portacl lineinfile: path: /etc/rc.conf - line: 'dnscrypt_proxy_mac_portacl_enable="YES"' + line: dnscrypt_proxy_mac_portacl_enable="YES" diff --git a/roles/dns/tasks/ubuntu.yml b/roles/dns/tasks/ubuntu.yml index 3ccbdc1..b54e377 100644 --- a/roles/dns/tasks/ubuntu.yml +++ b/roles/dns/tasks/ubuntu.yml @@ -1,22 +1,22 @@ --- - block: - - name: Add the repository - apt_repository: - state: present - codename: "{{ ansible_distribution_release }}" - repo: ppa:shevchuk/dnscrypt-proxy - register: result - until: result is succeeded - retries: 10 - delay: 3 + - name: Add the repository + apt_repository: + state: present + codename: "{{ ansible_distribution_release }}" + repo: ppa:shevchuk/dnscrypt-proxy + register: result + until: result is succeeded + retries: 10 + delay: 3 - - name: Configure unattended-upgrades - copy: - src: 50-dnscrypt-proxy-unattended-upgrades - dest: /etc/apt/apt.conf.d/50-dnscrypt-proxy-unattended-upgrades - owner: root - group: root - mode: 0644 + - name: Configure unattended-upgrades + copy: + src: 50-dnscrypt-proxy-unattended-upgrades + dest: /etc/apt/apt.conf.d/50-dnscrypt-proxy-unattended-upgrades + owner: root + group: root + mode: 0644 when: ansible_facts['distribution_version'] is version('20.04', '<') - name: Install dnscrypt-proxy @@ -26,18 +26,18 @@ update_cache: true - block: - - name: Ubuntu | Configure AppArmor policy for dnscrypt-proxy - copy: - src: apparmor.profile.dnscrypt-proxy - dest: /etc/apparmor.d/usr.bin.dnscrypt-proxy - owner: root - group: root - mode: 0600 - notify: restart dnscrypt-proxy + - name: Ubuntu | Configure AppArmor policy for dnscrypt-proxy + copy: + src: apparmor.profile.dnscrypt-proxy + dest: /etc/apparmor.d/usr.bin.dnscrypt-proxy + owner: root + group: root + mode: 0600 + notify: restart dnscrypt-proxy - - name: Ubuntu | Enforce the dnscrypt-proxy AppArmor policy - command: aa-enforce usr.bin.dnscrypt-proxy - changed_when: false + - name: Ubuntu | Enforce the dnscrypt-proxy AppArmor policy + command: aa-enforce usr.bin.dnscrypt-proxy + changed_when: false tags: apparmor when: apparmor_enabled|default(false)|bool @@ -60,4 +60,4 @@ [Service] AmbientCapabilities=CAP_NET_BIND_SERVICE notify: - - restart dnscrypt-proxy + - restart dnscrypt-proxy diff --git a/roles/dns/templates/dnscrypt-proxy.toml.j2 b/roles/dns/templates/dnscrypt-proxy.toml.j2 index a51c7b7..0317001 100644 --- a/roles/dns/templates/dnscrypt-proxy.toml.j2 +++ b/roles/dns/templates/dnscrypt-proxy.toml.j2 @@ -118,11 +118,12 @@ timeout = 2500 keepalive = 30 -## Use the REFUSED return code for blocked responses -## Setting this to `false` means that some responses will be lies. -## Unfortunately, `false` appears to be required for Android 8+ +## Response for blocked queries. Options are `refused`, `hinfo` (default) or +## an IP response. To give an IP response, use the format `a:,aaaa:`. +## Using the `hinfo` option means that some responses will be lies. +## Unfortunately, the `hinfo` option appears to be required for Android 8+ -refused_code_in_responses = false +# blocked_query_response = 'refused' ## Load-balancing strategy: 'p2' (default), 'ph', 'first' or 'random' @@ -523,7 +524,7 @@ cache_neg_max_ttl = 600 [sources.'public-resolvers'] urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md'] - cache_file = '/tmp/public-resolvers.md' + cache_file = '/var/cache/dnscrypt-proxy/public-resolvers.md' minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3' prefix = '' @@ -550,5 +551,10 @@ cache_neg_max_ttl = 600 [static] +{% if custom_server_stamps %}{% for name, stamp in custom_server_stamps.items() %} + [static.'{{ name }}'] + stamp = '{{ stamp }}' +{%- endfor %}{% endif %} + # [static.'myserver'] # stamp = 'sdns:AQcAAAAAAAAAAAAQMi5kbnNjcnlwdC1jZXJ0Lg' diff --git a/roles/local/tasks/prompts.yml b/roles/local/tasks/prompts.yml index fa085ec..76d2a4e 100644 --- a/roles/local/tasks/prompts.yml +++ b/roles/local/tasks/prompts.yml @@ -1,4 +1,16 @@ --- +- pause: + prompt: "{{ item }}" + when: not tests|default(false)|bool + tags: + - skip_ansible_lint + with_items: | + https://trailofbits.github.io/algo/deploy-to-ubuntu.html + + Local installation might break your server. Use at your own risk. + + Proceed? Press ENTER to continue or CTRL+C and A to abort... + - pause: prompt: | Enter the IP address of your server: (or use localhost for local installation): @@ -8,25 +20,25 @@ - name: Set the facts set_fact: - cloud_instance_ip: >- + cloud_instance_ip: >- {% if server is defined %}{{ server }} {%- elif _algo_server.user_input %}{{ _algo_server.user_input }} {%- else %}localhost{% endif %} - block: - - pause: - prompt: | - What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost) - [root] - register: _algo_ssh_user - when: ssh_user is undefined - - - name: Set the facts - set_fact: - ansible_ssh_user: >- - {% if ssh_user is defined %}{{ ssh_user }} - {%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }} - {%- else %}root{% endif %} + - pause: + prompt: | + What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost) + [root] + register: _algo_ssh_user + when: ssh_user is undefined + + - name: Set the facts + set_fact: + ansible_ssh_user: >- + {% if ssh_user is defined %}{{ ssh_user }} + {%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }} + {%- else %}root{% endif %} when: cloud_instance_ip != "localhost" - pause: diff --git a/roles/ssh_tunneling/defaults/main.yml b/roles/ssh_tunneling/defaults/main.yml index 3ed9b59..7c39943 100644 --- a/roles/ssh_tunneling/defaults/main.yml +++ b/roles/ssh_tunneling/defaults/main.yml @@ -1,2 +1,2 @@ --- -ssh_tunnels_config_path: "configs/{{ IP_subject_alt_name }}/ssh-tunnel/" +ssh_tunnels_config_path: configs/{{ IP_subject_alt_name }}/ssh-tunnel/ diff --git a/roles/ssh_tunneling/handlers/main.yml b/roles/ssh_tunneling/handlers/main.yml index 066d960..eae0ef1 100644 --- a/roles/ssh_tunneling/handlers/main.yml +++ b/roles/ssh_tunneling/handlers/main.yml @@ -1,2 +1,3 @@ +--- - name: restart ssh service: name="{{ ssh_service_name|default('ssh') }}" state=restarted diff --git a/roles/ssh_tunneling/tasks/main.yml b/roles/ssh_tunneling/tasks/main.yml index 2226bbe..8abdd62 100644 --- a/roles/ssh_tunneling/tasks/main.yml +++ b/roles/ssh_tunneling/tasks/main.yml @@ -2,7 +2,7 @@ - name: Ensure that the sshd_config file has desired options blockinfile: dest: /etc/ssh/sshd_config - marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role' + marker: "# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role" block: | Match Group algo AllowTcpForwarding local @@ -28,90 +28,90 @@ group: "{{ root_group|default('root') }}" - block: - - name: Ensure that the SSH users exist - user: - name: "{{ item }}" - group: algo - home: '/var/jail/{{ item }}' - createhome: yes - generate_ssh_key: false - shell: /bin/false - state: present - append: yes - with_items: "{{ users }}" + - name: Ensure that the SSH users exist + user: + name: "{{ item }}" + group: algo + home: /var/jail/{{ item }} + createhome: true + generate_ssh_key: false + shell: /bin/false + state: present + append: true + with_items: "{{ users }}" - - block: - - name: Clean up the ssh-tunnel directory - file: - dest: "{{ ssh_tunnels_config_path }}" - state: absent - when: keys_clean_all|bool + - block: + - name: Clean up the ssh-tunnel directory + file: + dest: "{{ ssh_tunnels_config_path }}" + state: absent + when: keys_clean_all|bool - - name: Ensure the config directories exist - file: - dest: "{{ ssh_tunnels_config_path }}" - state: directory - recurse: yes - mode: '0700' + - name: Ensure the config directories exist + file: + dest: "{{ ssh_tunnels_config_path }}" + state: directory + recurse: true + mode: "0700" - - name: Check if the private keys exist - stat: - path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem" - register: privatekey - with_items: "{{ users }}" + - name: Check if the private keys exist + stat: + path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem" + register: privatekey + with_items: "{{ users }}" - - name: Build ssh private keys - openssl_privatekey: - path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem" - passphrase: "{{ p12_export_password }}" - cipher: aes256 - force: false - no_log: true - when: not item.stat.exists - with_items: "{{ privatekey.results }}" - register: openssl_privatekey + - name: Build ssh private keys + openssl_privatekey: + path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem" + passphrase: "{{ p12_export_password }}" + cipher: auto + force: false + no_log: "{{ no_log|bool }}" + when: not item.stat.exists + with_items: "{{ privatekey.results }}" + register: openssl_privatekey - - name: Build ssh public keys - openssl_publickey: - path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub" - privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem" - privatekey_passphrase: "{{ p12_export_password }}" - format: OpenSSH - force: true - no_log: true - when: item.changed - with_items: "{{ openssl_privatekey.results }}" + - name: Build ssh public keys + openssl_publickey: + path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub" + privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem" + privatekey_passphrase: "{{ p12_export_password }}" + format: OpenSSH + force: true + no_log: "{{ no_log|bool }}" + when: item.changed + with_items: "{{ openssl_privatekey.results }}" - - name: Build the client ssh config - template: - src: ssh_config.j2 - dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config" - mode: 0700 - with_items: "{{ users }}" - delegate_to: localhost - become: false + - name: Build the client ssh config + template: + src: ssh_config.j2 + dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config" + mode: 0700 + with_items: "{{ users }}" + delegate_to: localhost + become: false - - name: The authorized keys file created - authorized_key: - user: "{{ item }}" - key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}" - state: present - manage_dir: true - exclusive: true - with_items: "{{ users }}" + - name: The authorized keys file created + authorized_key: + user: "{{ item }}" + key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}" + state: present + manage_dir: true + exclusive: true + with_items: "{{ users }}" - - name: Get active users - getent: - database: group - key: algo - split: ':' + - name: Get active users + getent: + database: group + key: algo + split: ":" - - name: Delete non-existing users - user: - name: "{{ item }}" - state: absent - remove: yes - force: yes - when: item not in users - with_items: "{{ getent_group['algo'][2].split(',') }}" + - name: Delete non-existing users + user: + name: "{{ item }}" + state: absent + remove: true + force: true + when: item not in users + with_items: "{{ getent_group['algo'][2].split(',') }}" tags: update-users diff --git a/roles/ssh_tunneling/templates/ssh_config.j2 b/roles/ssh_tunneling/templates/ssh_config.j2 index 04931fc..54600b1 100644 --- a/roles/ssh_tunneling/templates/ssh_config.j2 +++ b/roles/ssh_tunneling/templates/ssh_config.j2 @@ -2,6 +2,7 @@ Host algo DynamicForward 127.0.0.1:1080 LogLevel quiet Compression yes + IdentitiesOnly yes IdentityFile {{ item }}.ssh.pem User {{ item }} Hostname {{ IP_subject_alt_name }} diff --git a/roles/strongswan/defaults/main.yml b/roles/strongswan/defaults/main.yml index dd50ddf..2483b3a 100644 --- a/roles/strongswan/defaults/main.yml +++ b/roles/strongswan/defaults/main.yml @@ -1,5 +1,5 @@ --- -ipsec_config_path: "configs/{{ IP_subject_alt_name }}/ipsec/" +ipsec_config_path: configs/{{ IP_subject_alt_name }}/ipsec/ ipsec_pki_path: "{{ ipsec_config_path }}/.pki/" strongswan_shell: /usr/sbin/nologin strongswan_home: /var/lib/strongswan @@ -7,7 +7,7 @@ strongswan_service: "{{ 'strongswan-starter' if ansible_facts['distribution_vers BetweenClients_DROP: true algo_ondemand_cellular: false algo_ondemand_wifi: false -algo_ondemand_wifi_exclude: '_null' +algo_ondemand_wifi_exclude: _null algo_dns_adblocking: false ipv6_support: false dns_encryption: true @@ -16,7 +16,7 @@ subjectAltName_type: "{{ 'DNS' if IP_subject_alt_name|regex_search('[a-z]') else subjectAltName: >- {{ subjectAltName_type }}:{{ IP_subject_alt_name }} {%- if ipv6_support -%},IP:{{ ansible_default_ipv6['address'] }}{%- endif -%} -subjectAltName_USER: "email:{{ item }}@{{ openssl_constraint_random_id }}" +subjectAltName_USER: email:{{ item }}@{{ openssl_constraint_random_id }} nameConstraints: >- critical,permitted;{{ subjectAltName_type }}:{{ IP_subject_alt_name }}{{- '/255.255.255.255' if subjectAltName_type == 'IP' else '' -}} {%- if subjectAltName_type == 'IP' -%} diff --git a/roles/strongswan/handlers/main.yml b/roles/strongswan/handlers/main.yml index a3d504f..3e2b035 100644 --- a/roles/strongswan/handlers/main.yml +++ b/roles/strongswan/handlers/main.yml @@ -1,3 +1,4 @@ +--- - name: restart strongswan service: name={{ strongswan_service }} state=restarted diff --git a/roles/strongswan/tasks/client_configs.yml b/roles/strongswan/tasks/client_configs.yml index 8798950..083e5d7 100644 --- a/roles/strongswan/tasks/client_configs.yml +++ b/roles/strongswan/tasks/client_configs.yml @@ -4,7 +4,7 @@ set -o pipefail cat private/{{ item }}.p12 | base64 - register: PayloadContent + register: PayloadContent changed_when: false args: executable: bash @@ -23,7 +23,7 @@ with_together: - "{{ users }}" - "{{ PayloadContent.results }}" - no_log: True + no_log: "{{ no_log|bool }}" - name: Build the client ipsec config file template: diff --git a/roles/strongswan/tasks/distribute_keys.yml b/roles/strongswan/tasks/distribute_keys.yml index 02df30b..55d1da1 100644 --- a/roles/strongswan/tasks/distribute_keys.yml +++ b/roles/strongswan/tasks/distribute_keys.yml @@ -1,5 +1,4 @@ --- - - name: Copy the keys to the strongswan directory copy: src: "{{ ipsec_pki_path }}/{{ item.src }}" @@ -8,18 +7,18 @@ group: "{{ item.group }}" mode: "{{ item.mode }}" with_items: - - src: "cacert.pem" - dest: "cacerts/ca.crt" + - src: cacert.pem + dest: cacerts/ca.crt owner: strongswan group: "{{ root_group|default('root') }}" mode: "0600" - - src: "certs/{{ IP_subject_alt_name }}.crt" - dest: "certs/{{ IP_subject_alt_name }}.crt" + - src: certs/{{ IP_subject_alt_name }}.crt + dest: certs/{{ IP_subject_alt_name }}.crt owner: strongswan group: "{{ root_group|default('root') }}" mode: "0600" - - src: "private/{{ IP_subject_alt_name }}.key" - dest: "private/{{ IP_subject_alt_name }}.key" + - src: private/{{ IP_subject_alt_name }}.key + dest: private/{{ IP_subject_alt_name }}.key owner: strongswan group: "{{ root_group|default('root') }}" mode: "0600" diff --git a/roles/strongswan/tasks/ipsec_configuration.yml b/roles/strongswan/tasks/ipsec_configuration.yml index d75a93c..7ba44c3 100644 --- a/roles/strongswan/tasks/ipsec_configuration.yml +++ b/roles/strongswan/tasks/ipsec_configuration.yml @@ -1,5 +1,4 @@ --- - - name: Setup the config files from our templates template: src: "{{ item.src }}" @@ -9,22 +8,22 @@ mode: "{{ item.mode }}" with_items: - src: strongswan.conf.j2 - dest: "strongswan.conf" + dest: strongswan.conf owner: root group: "{{ root_group|default('root') }}" mode: "0644" - src: ipsec.conf.j2 - dest: "ipsec.conf" + dest: ipsec.conf owner: root group: "{{ root_group|default('root') }}" mode: "0644" - src: ipsec.secrets.j2 - dest: "ipsec.secrets" + dest: ipsec.secrets owner: strongswan group: "{{ root_group|default('root') }}" mode: "0600" - src: charon.conf.j2 - dest: "strongswan.d/charon.conf" + dest: strongswan.d/charon.conf owner: root group: "{{ root_group|default('root') }}" mode: "0644" @@ -44,8 +43,8 @@ - name: Disable unneeded plugins lineinfile: dest: "{{ config_prefix|default('/') }}etc/strongswan.d/charon/{{ item }}.conf" - regexp: '.*load.*' - line: 'load = no' + regexp: .*load.* + line: load = no state: present notify: - restart strongswan diff --git a/roles/strongswan/tasks/main.yml b/roles/strongswan/tasks/main.yml index 1c4c267..37b4dcb 100644 --- a/roles/strongswan/tasks/main.yml +++ b/roles/strongswan/tasks/main.yml @@ -19,7 +19,7 @@ - import_tasks: distribute_keys.yml - import_tasks: client_configs.yml delegate_to: localhost - become: no + become: false tags: update-users - name: strongSwan started diff --git a/roles/strongswan/tasks/openssl.yml b/roles/strongswan/tasks/openssl.yml index c7e193f..f51ac9d 100644 --- a/roles/strongswan/tasks/openssl.yml +++ b/roles/strongswan/tasks/openssl.yml @@ -1,239 +1,239 @@ --- - block: - - debug: var=subjectAltName - - - name: Ensure the pki directory does not exist - file: - dest: "{{ ipsec_pki_path }}" - state: absent - when: keys_clean_all|bool - - - name: Ensure the pki directories exist - file: - dest: "{{ ipsec_pki_path }}/{{ item }}" - state: directory - recurse: yes - mode: '0700' - with_items: - - ecparams - - certs - - crl - - newcerts - - private - - public - - reqs - - - name: Ensure the config directories exist - file: - dest: "{{ ipsec_config_path }}/{{ item }}" - state: directory - recurse: yes - mode: '0700' - with_items: - - apple - - manual - - - name: Ensure the files exist - file: - dest: "{{ ipsec_pki_path }}/{{ item }}" - state: touch - with_items: - - ".rnd" - - "private/.rnd" - - "index.txt" - - "index.txt.attr" - - "serial" - - - name: Generate the openssl server configs - template: - src: openssl.cnf.j2 - dest: "{{ ipsec_pki_path }}/openssl.cnf" - - - name: Build the CA pair - shell: > - umask 077; - {{ openssl_bin }} ecparam -name secp384r1 -out ecparams/secp384r1.pem && - {{ openssl_bin }} req -utf8 -new - -newkey ec:ecparams/secp384r1.pem - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}")) - -keyout private/cakey.pem - -out cacert.pem -x509 -days 3650 - -batch - -passout pass:"{{ CA_password }}" && - touch {{ IP_subject_alt_name }}_ca_generated - args: - chdir: "{{ ipsec_pki_path }}" - creates: "{{ IP_subject_alt_name }}_ca_generated" - executable: bash - - - name: Copy the CA certificate - copy: - src: "{{ ipsec_pki_path }}/cacert.pem" - dest: "{{ ipsec_config_path }}/manual/cacert.pem" - - - name: Generate the serial number - shell: echo 01 > serial && touch serial_generated - args: - chdir: "{{ ipsec_pki_path }}" - creates: serial_generated - - - name: Build the server pair - shell: > - umask 077; - {{ openssl_bin }} req -utf8 -new - -newkey ec:ecparams/secp384r1.pem - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}")) - -keyout private/{{ IP_subject_alt_name }}.key - -out reqs/{{ IP_subject_alt_name }}.req -nodes - -passin pass:"{{ CA_password }}" - -subj "/CN={{ IP_subject_alt_name }}" -batch && - {{ openssl_bin }} ca -utf8 - -in reqs/{{ IP_subject_alt_name }}.req - -out certs/{{ IP_subject_alt_name }}.crt - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}")) - -days 3650 -batch - -passin pass:"{{ CA_password }}" - -subj "/CN={{ IP_subject_alt_name }}" && - touch certs/{{ IP_subject_alt_name }}_crt_generated - args: - chdir: "{{ ipsec_pki_path }}" - creates: certs/{{ IP_subject_alt_name }}_crt_generated - executable: bash - - - name: Build the client's pair - shell: > - umask 077; - {{ openssl_bin }} req -utf8 -new - -newkey ec:ecparams/secp384r1.pem - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}")) - -keyout private/{{ item }}.key - -out reqs/{{ item }}.req -nodes - -passin pass:"{{ CA_password }}" - -subj "/CN={{ item }}" -batch && - {{ openssl_bin }} ca -utf8 - -in reqs/{{ item }}.req - -out certs/{{ item }}.crt - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}")) - -days 3650 -batch - -passin pass:"{{ CA_password }}" - -subj "/CN={{ item }}" && - touch certs/{{ item }}_crt_generated - args: - chdir: "{{ ipsec_pki_path }}" - creates: certs/{{ item }}_crt_generated - executable: bash - with_items: "{{ users }}" - - - name: Build the tests pair - shell: > - umask 077; - {{ openssl_bin }} req -utf8 -new - -newkey ec:ecparams/secp384r1.pem - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com")) - -keyout private/google-algo-test-pair.com.key - -out reqs/google-algo-test-pair.com.req -nodes - -passin pass:"{{ CA_password }}" - -subj "/CN=google-algo-test-pair.com" -batch && - {{ openssl_bin }} ca -utf8 - -in reqs/google-algo-test-pair.com.req - -out certs/google-algo-test-pair.com.crt - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com")) - -days 3650 -batch - -passin pass:"{{ CA_password }}" - -subj "/CN=google-algo-test-pair.com" && - touch certs/google-algo-test-pair.com_crt_generated - args: - chdir: "{{ ipsec_pki_path }}" - creates: certs/google-algo-test-pair.com_crt_generated - executable: bash - when: tests|default(false)|bool - - - name: Build openssh public keys - openssl_publickey: - path: "{{ ipsec_pki_path }}/public/{{ item }}.pub" - privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key" - format: OpenSSH - with_items: "{{ users }}" - - - name: Build the client's p12 - shell: > - umask 077; - {{ openssl_bin }} pkcs12 - -in certs/{{ item }}.crt - -inkey private/{{ item }}.key - -export - -name {{ item }} - -out private/{{ item }}.p12 - -passout pass:"{{ p12_export_password }}" - args: - chdir: "{{ ipsec_pki_path }}" - executable: bash - with_items: "{{ users }}" - register: p12 - - - name: Build the client's p12 with the CA cert included - shell: > - umask 077; - {{ openssl_bin }} pkcs12 - -in certs/{{ item }}.crt - -inkey private/{{ item }}.key - -export - -name {{ item }} - -out private/{{ item }}_ca.p12 - -certfile cacert.pem - -passout pass:"{{ p12_export_password }}" - args: - chdir: "{{ ipsec_pki_path }}" - executable: bash - with_items: "{{ users }}" - register: p12 - - - name: Copy the p12 certificates - copy: - src: "{{ ipsec_pki_path }}/private/{{ item }}.p12" - dest: "{{ ipsec_config_path }}/manual/{{ item }}.p12" - with_items: - - "{{ users }}" - - - name: Get active users - shell: > - grep ^V index.txt | - grep -v "{{ IP_subject_alt_name }}" | - awk '{print $5}' | - sed 's/\/CN=//g' - args: - chdir: "{{ ipsec_pki_path }}" - register: valid_certs - - - name: Revoke non-existing users - shell: > - {{ openssl_bin }} ca -gencrl - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}")) - -passin pass:"{{ CA_password }}" - -revoke certs/{{ item }}.crt - -out crl/{{ item }}.crt - register: gencrl - args: - chdir: "{{ ipsec_pki_path }}" - creates: crl/{{ item }}.crt - executable: bash - when: item.split('@')[0] not in users - with_items: "{{ valid_certs.stdout_lines }}" - - - name: Genereate new CRL file - shell: > - {{ openssl_bin }} ca -gencrl - -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ IP_subject_alt_name }}")) - -passin pass:"{{ CA_password }}" - -out crl/algo.root.pem - when: - - gencrl is defined - - gencrl.changed - args: - chdir: "{{ ipsec_pki_path }}" - executable: bash + - debug: var=subjectAltName + + - name: Ensure the pki directory does not exist + file: + dest: "{{ ipsec_pki_path }}" + state: absent + when: keys_clean_all|bool + + - name: Ensure the pki directories exist + file: + dest: "{{ ipsec_pki_path }}/{{ item }}" + state: directory + recurse: true + mode: "0700" + with_items: + - ecparams + - certs + - crl + - newcerts + - private + - public + - reqs + + - name: Ensure the config directories exist + file: + dest: "{{ ipsec_config_path }}/{{ item }}" + state: directory + recurse: true + mode: "0700" + with_items: + - apple + - manual + + - name: Ensure the files exist + file: + dest: "{{ ipsec_pki_path }}/{{ item }}" + state: touch + with_items: + - .rnd + - private/.rnd + - index.txt + - index.txt.attr + - serial + + - name: Generate the openssl server configs + template: + src: openssl.cnf.j2 + dest: "{{ ipsec_pki_path }}/openssl.cnf" + + - name: Build the CA pair + shell: > + umask 077; + {{ openssl_bin }} ecparam -name secp384r1 -out ecparams/secp384r1.pem && + {{ openssl_bin }} req -utf8 -new + -newkey ec:ecparams/secp384r1.pem + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}")) + -keyout private/cakey.pem + -out cacert.pem -x509 -days 3650 + -batch + -passout pass:"{{ CA_password }}" && + touch {{ IP_subject_alt_name }}_ca_generated + args: + chdir: "{{ ipsec_pki_path }}" + creates: "{{ IP_subject_alt_name }}_ca_generated" + executable: bash + + - name: Copy the CA certificate + copy: + src: "{{ ipsec_pki_path }}/cacert.pem" + dest: "{{ ipsec_config_path }}/manual/cacert.pem" + + - name: Generate the serial number + shell: echo 01 > serial && touch serial_generated + args: + chdir: "{{ ipsec_pki_path }}" + creates: serial_generated + + - name: Build the server pair + shell: > + umask 077; + {{ openssl_bin }} req -utf8 -new + -newkey ec:ecparams/secp384r1.pem + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}")) + -keyout private/{{ IP_subject_alt_name }}.key + -out reqs/{{ IP_subject_alt_name }}.req -nodes + -passin pass:"{{ CA_password }}" + -subj "/CN={{ IP_subject_alt_name }}" -batch && + {{ openssl_bin }} ca -utf8 + -in reqs/{{ IP_subject_alt_name }}.req + -out certs/{{ IP_subject_alt_name }}.crt + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName }}")) + -days 3650 -batch + -passin pass:"{{ CA_password }}" + -subj "/CN={{ IP_subject_alt_name }}" && + touch certs/{{ IP_subject_alt_name }}_crt_generated + args: + chdir: "{{ ipsec_pki_path }}" + creates: certs/{{ IP_subject_alt_name }}_crt_generated + executable: bash + + - name: Build the client's pair + shell: > + umask 077; + {{ openssl_bin }} req -utf8 -new + -newkey ec:ecparams/secp384r1.pem + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}")) + -keyout private/{{ item }}.key + -out reqs/{{ item }}.req -nodes + -passin pass:"{{ CA_password }}" + -subj "/CN={{ item }}" -batch && + {{ openssl_bin }} ca -utf8 + -in reqs/{{ item }}.req + -out certs/{{ item }}.crt + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}")) + -days 3650 -batch + -passin pass:"{{ CA_password }}" + -subj "/CN={{ item }}" && + touch certs/{{ item }}_crt_generated + args: + chdir: "{{ ipsec_pki_path }}" + creates: certs/{{ item }}_crt_generated + executable: bash + with_items: "{{ users }}" + + - name: Build the tests pair + shell: > + umask 077; + {{ openssl_bin }} req -utf8 -new + -newkey ec:ecparams/secp384r1.pem + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com")) + -keyout private/google-algo-test-pair.com.key + -out reqs/google-algo-test-pair.com.req -nodes + -passin pass:"{{ CA_password }}" + -subj "/CN=google-algo-test-pair.com" -batch && + {{ openssl_bin }} ca -utf8 + -in reqs/google-algo-test-pair.com.req + -out certs/google-algo-test-pair.com.crt + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:google-algo-test-pair.com")) + -days 3650 -batch + -passin pass:"{{ CA_password }}" + -subj "/CN=google-algo-test-pair.com" && + touch certs/google-algo-test-pair.com_crt_generated + args: + chdir: "{{ ipsec_pki_path }}" + creates: certs/google-algo-test-pair.com_crt_generated + executable: bash + when: tests|default(false)|bool + + - name: Build openssh public keys + openssl_publickey: + path: "{{ ipsec_pki_path }}/public/{{ item }}.pub" + privatekey_path: "{{ ipsec_pki_path }}/private/{{ item }}.key" + format: OpenSSH + with_items: "{{ users }}" + + - name: Build the client's p12 + shell: > + umask 077; + {{ openssl_bin }} pkcs12 + -in certs/{{ item }}.crt + -inkey private/{{ item }}.key + -export + -name {{ item }} + -out private/{{ item }}.p12 + -passout pass:"{{ p12_export_password }}" + args: + chdir: "{{ ipsec_pki_path }}" + executable: bash + with_items: "{{ users }}" + register: p12 + + - name: Build the client's p12 with the CA cert included + shell: > + umask 077; + {{ openssl_bin }} pkcs12 + -in certs/{{ item }}.crt + -inkey private/{{ item }}.key + -export + -name {{ item }} + -out private/{{ item }}_ca.p12 + -certfile cacert.pem + -passout pass:"{{ p12_export_password }}" + args: + chdir: "{{ ipsec_pki_path }}" + executable: bash + with_items: "{{ users }}" + register: p12 + + - name: Copy the p12 certificates + copy: + src: "{{ ipsec_pki_path }}/private/{{ item }}.p12" + dest: "{{ ipsec_config_path }}/manual/{{ item }}.p12" + with_items: + - "{{ users }}" + + - name: Get active users + shell: > + grep ^V index.txt | + grep -v "{{ IP_subject_alt_name }}" | + awk '{print $5}' | + sed 's/\/CN=//g' + args: + chdir: "{{ ipsec_pki_path }}" + register: valid_certs + + - name: Revoke non-existing users + shell: > + {{ openssl_bin }} ca -gencrl + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName={{ subjectAltName_USER }}")) + -passin pass:"{{ CA_password }}" + -revoke certs/{{ item }}.crt + -out crl/{{ item }}.crt + register: gencrl + args: + chdir: "{{ ipsec_pki_path }}" + creates: crl/{{ item }}.crt + executable: bash + when: item.split('@')[0] not in users + with_items: "{{ valid_certs.stdout_lines }}" + + - name: Generate new CRL file + shell: > + {{ openssl_bin }} ca -gencrl + -config <(cat openssl.cnf <(printf "[basic_exts]\nsubjectAltName=DNS:{{ IP_subject_alt_name }}")) + -passin pass:"{{ CA_password }}" + -out crl/algo.root.pem + when: + - gencrl is defined + - gencrl.changed + args: + chdir: "{{ ipsec_pki_path }}" + executable: bash delegate_to: localhost - become: no + become: false vars: ansible_python_interpreter: "{{ ansible_playbook_python }}" diff --git a/roles/strongswan/tasks/ubuntu.yml b/roles/strongswan/tasks/ubuntu.yml index 2510511..424352e 100644 --- a/roles/strongswan/tasks/ubuntu.yml +++ b/roles/strongswan/tasks/ubuntu.yml @@ -2,32 +2,31 @@ - name: Set OS specific facts set_fact: strongswan_additional_plugins: [] - - name: Ubuntu | Install strongSwan apt: name: strongswan state: present - update_cache: yes - install_recommends: yes + update_cache: true + install_recommends: true - block: - # https://bugs.launchpad.net/ubuntu/+source/strongswan/+bug/1826238 - - name: Ubuntu | Charon profile for apparmor configured - copy: - dest: /etc/apparmor.d/local/usr.lib.ipsec.charon - content: ' capability setpcap,' - owner: root - group: root - mode: 0644 - notify: restart strongswan + # https://bugs.launchpad.net/ubuntu/+source/strongswan/+bug/1826238 + - name: Ubuntu | Charon profile for apparmor configured + copy: + dest: /etc/apparmor.d/local/usr.lib.ipsec.charon + content: " capability setpcap," + owner: root + group: root + mode: 0644 + notify: restart strongswan - - name: Ubuntu | Enforcing ipsec with apparmor - command: aa-enforce "{{ item }}" - changed_when: false - with_items: - - /usr/lib/ipsec/charon - - /usr/lib/ipsec/lookip - - /usr/lib/ipsec/stroke + - name: Ubuntu | Enforcing ipsec with apparmor + command: aa-enforce "{{ item }}" + changed_when: false + with_items: + - /usr/lib/ipsec/charon + - /usr/lib/ipsec/lookip + - /usr/lib/ipsec/stroke tags: apparmor when: apparmor_enabled|default(false)|bool diff --git a/roles/strongswan/templates/openssl.cnf.j2 b/roles/strongswan/templates/openssl.cnf.j2 index fa22017..bd199b3 100644 --- a/roles/strongswan/templates/openssl.cnf.j2 +++ b/roles/strongswan/templates/openssl.cnf.j2 @@ -21,7 +21,7 @@ crl = $dir/crl.pem # The current CRL private_key = $dir/private/cakey.pem # The private key RANDFILE = $dir/private/.rand # private random number file -x509_extensions = basic_exts # The extentions to add to the cert +x509_extensions = basic_exts # The extensions to add to the cert # This allows a V2 CRL. Ancient browsers don't like it, but anything Easy-RSA # is designed for will. In return, we get the Issuer attached to CRLs. @@ -56,7 +56,7 @@ default_bits = 2048 default_keyfile = privkey.pem default_md = sha256 distinguished_name = cn_only -x509_extensions = easyrsa_ca # The extentions to add to the self signed cert +x509_extensions = easyrsa_ca # The extensions to add to the self signed cert # A placeholder to handle the $EXTRA_EXTS feature: #%EXTRA_EXTS% # Do NOT remove or change this line as $EXTRA_EXTS support requires it diff --git a/roles/wireguard/defaults/main.yml b/roles/wireguard/defaults/main.yml index 030511f..45c4029 100644 --- a/roles/wireguard/defaults/main.yml +++ b/roles/wireguard/defaults/main.yml @@ -1,6 +1,6 @@ --- wireguard_PersistentKeepalive: 0 -wireguard_config_path: "configs/{{ IP_subject_alt_name }}/wireguard/" +wireguard_config_path: configs/{{ IP_subject_alt_name }}/wireguard/ wireguard_pki_path: "{{ wireguard_config_path }}/.pki/" wireguard_interface: wg0 wireguard_port_avoid: 53 @@ -10,11 +10,12 @@ wireguard_dns_servers: >- {% if algo_dns_adblocking|default(false)|bool or dns_encryption|default(false)|bool %} {{ local_service_ip }}{{ ', ' + local_service_ipv6 if ipv6_support else '' }} {% else %} - {% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %} + {% for host in dns_servers.ipv4 %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}{% if ipv6_support %},{% for host in dns_servers.ipv6 %}{{ host }}{% if + not loop.last %},{% endif %}{% endfor %}{% endif %} {% endif %} wireguard_client_ip: >- - {{ wireguard_network_ipv4 | ipaddr(index|int+2) }} - {{ ',' + wireguard_network_ipv6 | ipaddr(index|int+2) if ipv6_support else '' }} + {{ wireguard_network_ipv4 | ipmath(index|int+2) }} + {{ ',' + wireguard_network_ipv6 | ipmath(index|int+2) if ipv6_support else '' }} wireguard_server_ip: >- {{ wireguard_network_ipv4 | ipaddr('1') }} {{ ',' + wireguard_network_ipv6 | ipaddr('1') if ipv6_support else '' }} diff --git a/roles/wireguard/tasks/keys.yml b/roles/wireguard/tasks/keys.yml index 8c9bbdc..e9ce8a3 100644 --- a/roles/wireguard/tasks/keys.yml +++ b/roles/wireguard/tasks/keys.yml @@ -18,24 +18,24 @@ - "{{ IP_subject_alt_name }}" - block: - - name: Save private keys - copy: - dest: "{{ wireguard_pki_path }}/private/{{ item['item'] }}" - content: "{{ item['stdout'] }}" - mode: "0600" - no_log: true - when: item.changed - with_items: "{{ wg_genkey['results'] }}" - delegate_to: localhost - become: false + - name: Save private keys + copy: + dest: "{{ wireguard_pki_path }}/private/{{ item['item'] }}" + content: "{{ item['stdout'] }}" + mode: "0600" + no_log: "{{ no_log|bool }}" + when: item.changed + with_items: "{{ wg_genkey['results'] }}" + delegate_to: localhost + become: false - - name: Touch the lock file - file: - dest: "{{ config_prefix|default('/') }}etc/wireguard/private_{{ item }}.lock" - state: touch - with_items: - - "{{ users }}" - - "{{ IP_subject_alt_name }}" + - name: Touch the lock file + file: + dest: "{{ config_prefix|default('/') }}etc/wireguard/private_{{ item }}.lock" + state: touch + with_items: + - "{{ users }}" + - "{{ IP_subject_alt_name }}" when: wg_genkey.changed - name: Delete the preshared lock files @@ -57,24 +57,24 @@ - "{{ IP_subject_alt_name }}" - block: - - name: Save preshared keys - copy: - dest: "{{ wireguard_pki_path }}/preshared/{{ item['item'] }}" - content: "{{ item['stdout'] }}" - mode: "0600" - no_log: true - when: item.changed - with_items: "{{ wg_genpsk['results'] }}" - delegate_to: localhost - become: false + - name: Save preshared keys + copy: + dest: "{{ wireguard_pki_path }}/preshared/{{ item['item'] }}" + content: "{{ item['stdout'] }}" + mode: "0600" + no_log: "{{ no_log|bool }}" + when: item.changed + with_items: "{{ wg_genpsk['results'] }}" + delegate_to: localhost + become: false - - name: Touch the preshared lock file - file: - dest: "{{ config_prefix|default('/') }}etc/wireguard/preshared_{{ item }}.lock" - state: touch - with_items: - - "{{ users }}" - - "{{ IP_subject_alt_name }}" + - name: Touch the preshared lock file + file: + dest: "{{ config_prefix|default('/') }}etc/wireguard/preshared_{{ item }}.lock" + state: touch + with_items: + - "{{ users }}" + - "{{ IP_subject_alt_name }}" when: wg_genpsk.changed - name: Generate public keys @@ -95,7 +95,7 @@ dest: "{{ wireguard_pki_path }}/public/{{ item['item'] }}" content: "{{ item['stdout'] }}" mode: "0600" - no_log: true + no_log: "{{ no_log|bool }}" with_items: "{{ wg_pubkey['results'] }}" delegate_to: localhost become: false diff --git a/roles/wireguard/tasks/main.yml b/roles/wireguard/tasks/main.yml index 7e1fbc1..4b65a0a 100644 --- a/roles/wireguard/tasks/main.yml +++ b/roles/wireguard/tasks/main.yml @@ -28,61 +28,61 @@ tags: update-users - block: - - block: - - name: WireGuard user list updated - lineinfile: - dest: "{{ wireguard_pki_path }}/index.txt" - create: true - mode: "0600" - insertafter: EOF - line: "{{ item }}" - register: lineinfile - with_items: "{{ users }}" + - block: + - name: WireGuard user list updated + lineinfile: + dest: "{{ wireguard_pki_path }}/index.txt" + create: true + mode: "0600" + insertafter: EOF + line: "{{ item }}" + register: lineinfile + with_items: "{{ users }}" - - set_fact: - wireguard_users: "{{ (lookup('file', wireguard_pki_path + 'index.txt')).split('\n') }}" + - set_fact: + wireguard_users: "{{ (lookup('file', wireguard_pki_path + 'index.txt')).split('\n') }}" - - name: WireGuard users config generated - template: - src: client.conf.j2 - dest: "{{ wireguard_config_path }}/{{ item.1 }}.conf" - mode: "0600" - with_indexed_items: "{{ wireguard_users }}" - when: item.1 in users - vars: - index: "{{ item.0 }}" + - name: WireGuard users config generated + template: + src: client.conf.j2 + dest: "{{ wireguard_config_path }}/{{ item.1 }}.conf" + mode: "0600" + with_indexed_items: "{{ wireguard_users }}" + when: item.1 in users + vars: + index: "{{ item.0 }}" - - include_tasks: mobileconfig.yml - loop: - - ios - - macos - loop_control: - loop_var: system + - include_tasks: mobileconfig.yml + loop: + - ios + - macos + loop_control: + loop_var: system - - name: Generate QR codes - shell: > - umask 077; - which segno && - segno --scale=5 --output={{ item.1 }}.png \ - "{{ lookup('template', 'client.conf.j2') }}" || true - changed_when: false - with_indexed_items: "{{ wireguard_users }}" - when: item.1 in users - vars: - index: "{{ item.0 }}" - ansible_python_interpreter: "{{ ansible_playbook_python }}" - args: - chdir: "{{ wireguard_config_path }}" - executable: bash - become: false - delegate_to: localhost + - name: Generate QR codes + shell: > + umask 077; + which segno && + segno --scale=5 --output={{ item.1 }}.png \ + "{{ lookup('template', 'client.conf.j2') }}" || true + changed_when: false + with_indexed_items: "{{ wireguard_users }}" + when: item.1 in users + vars: + index: "{{ item.0 }}" + ansible_python_interpreter: "{{ ansible_playbook_python }}" + args: + chdir: "{{ wireguard_config_path }}" + executable: bash + become: false + delegate_to: localhost - - name: WireGuard configured - template: - src: server.conf.j2 - dest: "{{ config_prefix|default('/') }}etc/wireguard/{{ wireguard_interface }}.conf" - mode: "0600" - notify: restart wireguard + - name: WireGuard configured + template: + src: server.conf.j2 + dest: "{{ config_prefix|default('/') }}etc/wireguard/{{ wireguard_interface }}.conf" + mode: "0600" + notify: restart wireguard tags: update-users - name: WireGuard enabled and started diff --git a/roles/wireguard/tasks/mobileconfig.yml b/roles/wireguard/tasks/mobileconfig.yml index 0e192b4..d12be4a 100644 --- a/roles/wireguard/tasks/mobileconfig.yml +++ b/roles/wireguard/tasks/mobileconfig.yml @@ -4,7 +4,7 @@ src: mobileconfig.j2 dest: "{{ wireguard_config_path }}/apple/{{ system }}/{{ item.1 }}.mobileconfig" mode: "0600" - with_indexed_items: "{{ wireguard_users }}" + with_indexed_items: "{{ wireguard_users }}" when: item.1 in users vars: index: "{{ item.0 }}" diff --git a/roles/wireguard/tasks/ubuntu.yml b/roles/wireguard/tasks/ubuntu.yml index 700cbd9..8682bb5 100644 --- a/roles/wireguard/tasks/ubuntu.yml +++ b/roles/wireguard/tasks/ubuntu.yml @@ -7,5 +7,5 @@ - name: Set OS specific facts set_fact: - service_name: "wg-quick@{{ wireguard_interface }}" + service_name: wg-quick@{{ wireguard_interface }} tags: always diff --git a/roles/wireguard/templates/server.conf.j2 b/roles/wireguard/templates/server.conf.j2 index 0104f5f..1baad83 100644 --- a/roles/wireguard/templates/server.conf.j2 +++ b/roles/wireguard/templates/server.conf.j2 @@ -12,6 +12,6 @@ SaveConfig = false # {{ u }} PublicKey = {{ lookup('file', wireguard_pki_path + '/public/' + u) }} PresharedKey = {{ lookup('file', wireguard_pki_path + '/preshared/' + u) }} -AllowedIPs = {{ wireguard_network_ipv4 | ipaddr(index|int+1) | ipv4('address') }}/32{{ ',' + wireguard_network_ipv6 | ipaddr(index|int+1) | ipv6('address') + '/128' if ipv6_support else '' }} +AllowedIPs = {{ wireguard_network_ipv4 | ipmath(index|int+1) | ipv4('address') }}/32{{ ',' + wireguard_network_ipv6 | ipmath(index|int+1) | ipv6('address') + '/128' if ipv6_support else '' }} {% endif %} {% endfor %} diff --git a/server.yml b/server.yml index 54551eb..d1828ea 100644 --- a/server.yml +++ b/server.yml @@ -7,117 +7,117 @@ - config.cfg tasks: - block: - - name: Wait until the cloud-init completed - wait_for: - path: /var/lib/cloud/data/result.json - delay: 10 - timeout: 600 - state: present - become: false - when: cloudinit + - name: Wait until the cloud-init completed + wait_for: + path: /var/lib/cloud/data/result.json + delay: 10 + timeout: 600 + state: present + become: false + when: cloudinit - - block: - - name: Ensure the config directory exists - file: - dest: "configs/{{ IP_subject_alt_name }}" - state: directory - mode: "0700" + - block: + - name: Ensure the config directory exists + file: + dest: configs/{{ IP_subject_alt_name }} + state: directory + mode: "0700" - - name: Dump the ssh config - copy: - dest: "configs/{{ IP_subject_alt_name }}/ssh_config" - mode: "0600" - content: | - Host {{ IP_subject_alt_name }} {{ algo_server_name }} - HostName {{ IP_subject_alt_name }} - User {{ ansible_ssh_user }} - Port {{ ansible_ssh_port }} - IdentityFile {{ SSH_keys.private | realpath }} - KeepAlive yes - ServerAliveInterval 30 - when: inventory_hostname != 'localhost' - become: false - delegate_to: localhost + - name: Dump the ssh config + copy: + dest: configs/{{ IP_subject_alt_name }}/ssh_config + mode: "0600" + content: | + Host {{ IP_subject_alt_name }} {{ algo_server_name }} + HostName {{ IP_subject_alt_name }} + User {{ ansible_ssh_user }} + Port {{ ansible_ssh_port }} + IdentitiesOnly yes + IdentityFile {{ SSH_keys.private | realpath }} + KeepAlive yes + ServerAliveInterval 30 + when: inventory_hostname != 'localhost' + become: false + delegate_to: localhost - - import_role: - name: common - tags: common + - import_role: + name: common + tags: common - - import_role: - name: dns - when: - - algo_dns_adblocking or - dns_encryption - tags: dns + - import_role: + name: dns + when: + - algo_dns_adblocking or dns_encryption + tags: dns - - import_role: - name: wireguard - when: wireguard_enabled - tags: wireguard + - import_role: + name: wireguard + when: wireguard_enabled + tags: wireguard - - import_role: - name: strongswan - when: ipsec_enabled - tags: ipsec + - import_role: + name: strongswan + when: ipsec_enabled + tags: ipsec - - import_role: - name: ssh_tunneling - when: algo_ssh_tunneling - tags: ssh_tunneling + - import_role: + name: ssh_tunneling + when: algo_ssh_tunneling + tags: ssh_tunneling - - block: - - name: Dump the configuration - copy: - dest: "configs/{{ IP_subject_alt_name }}/.config.yml" - content: | - server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }} - server_user: {{ ansible_ssh_user }} - ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}" - {% if algo_provider != "local" %} - ansible_ssh_private_key_file: {{ SSH_keys.private }} - {% endif %} - algo_provider: {{ algo_provider }} - algo_server_name: {{ algo_server_name }} - algo_ondemand_cellular: {{ algo_ondemand_cellular }} - algo_ondemand_wifi: {{ algo_ondemand_wifi }} - algo_ondemand_wifi_exclude: {{ algo_ondemand_wifi_exclude }} - algo_dns_adblocking: {{ algo_dns_adblocking }} - algo_ssh_tunneling: {{ algo_ssh_tunneling }} - algo_store_pki: {{ algo_store_pki }} - IP_subject_alt_name: {{ IP_subject_alt_name }} - ipsec_enabled: {{ ipsec_enabled }} - wireguard_enabled: {{ wireguard_enabled }} - {% if tests|default(false)|bool %} - ca_password: '{{ CA_password }}' - p12_password: '{{ p12_export_password }}' - {% endif %} - become: false - delegate_to: localhost + - block: + - name: Dump the configuration + copy: + dest: configs/{{ IP_subject_alt_name }}/.config.yml + content: | + server: {{ 'localhost' if inventory_hostname == 'localhost' else inventory_hostname }} + server_user: {{ ansible_ssh_user }} + ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}" + {% if algo_provider != "local" %} + ansible_ssh_private_key_file: {{ SSH_keys.private }} + {% endif %} + algo_provider: {{ algo_provider }} + algo_server_name: {{ algo_server_name }} + algo_ondemand_cellular: {{ algo_ondemand_cellular }} + algo_ondemand_wifi: {{ algo_ondemand_wifi }} + algo_ondemand_wifi_exclude: {{ algo_ondemand_wifi_exclude }} + algo_dns_adblocking: {{ algo_dns_adblocking }} + algo_ssh_tunneling: {{ algo_ssh_tunneling }} + algo_store_pki: {{ algo_store_pki }} + IP_subject_alt_name: {{ IP_subject_alt_name }} + ipsec_enabled: {{ ipsec_enabled }} + wireguard_enabled: {{ wireguard_enabled }} + {% if tests|default(false)|bool %} + ca_password: '{{ CA_password }}' + p12_password: '{{ p12_export_password }}' + {% endif %} + become: false + delegate_to: localhost - - name: Create a symlink if deploying to localhost - file: - src: "{{ IP_subject_alt_name }}" - dest: configs/localhost - state: link - force: true - when: inventory_hostname == 'localhost' + - name: Create a symlink if deploying to localhost + file: + src: "{{ IP_subject_alt_name }}" + dest: configs/localhost + state: link + force: true + when: inventory_hostname == 'localhost' - - name: Import tmpfs tasks - import_tasks: playbooks/tmpfs/umount.yml - become: false - delegate_to: localhost - vars: - facts: "{{ hostvars['localhost'] }}" - when: - - pki_in_tmpfs - - not algo_store_pki + - name: Import tmpfs tasks + import_tasks: playbooks/tmpfs/umount.yml + become: false + delegate_to: localhost + vars: + facts: "{{ hostvars['localhost'] }}" + when: + - pki_in_tmpfs + - not algo_store_pki - - debug: - msg: - - "{{ congrats.common.split('\n') }}" - - " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}" - - " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}" - - " {{ congrats.ssh_access if algo_provider != 'local' else ''}}" - tags: always + - debug: + msg: + - "{{ congrats.common.split('\n') }}" + - " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}" + - " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}" + - " {{ congrats.ssh_access if algo_provider != 'local' else ''}}" + tags: always rescue: - include_tasks: playbooks/rescue.yml diff --git a/tests/algo.conf b/tests/algo.conf deleted file mode 100644 index a93d420..0000000 --- a/tests/algo.conf +++ /dev/null @@ -1 +0,0 @@ -dhcp-host=algo,10.0.8.100 diff --git a/tests/ca-password-fix.sh b/tests/ca-password-fix.sh index 427fed6..43a9c9c 100644 --- a/tests/ca-password-fix.sh +++ b/tests/ca-password-fix.sh @@ -4,7 +4,7 @@ set -ex -DEPLOY_ARGS="provider=local server=10.0.8.100 ssh_user=ubuntu endpoint=10.0.8.100 ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test dns_adblocking=true ssh_tunneling=true store_pki=true install_headers=false tests=true local_service_ip=172.16.0.1" +DEPLOY_ARGS="provider=local server=10.0.8.100 ssh_user=ubuntu endpoint=10.0.8.100 ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test dns_adblocking=true ssh_tunneling=true store_pki=true install_headers=false tests=true local_service_ip=172.16.0.1 no_log=false" CA_PASSWORD="test123" diff --git a/tests/local-deploy.sh b/tests/local-deploy.sh index e3acc6c..6c7df69 100755 --- a/tests/local-deploy.sh +++ b/tests/local-deploy.sh @@ -2,7 +2,7 @@ set -ex -DEPLOY_ARGS="provider=local server=10.0.8.100 ssh_user=ubuntu endpoint=10.0.8.100 ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test dns_adblocking=true ssh_tunneling=true store_pki=true install_headers=false tests=true local_service_ip=172.16.0.1" +DEPLOY_ARGS="provider=local server=10.0.8.100 ssh_user=ubuntu endpoint=10.0.8.100 ondemand_cellular=true ondemand_wifi=true ondemand_wifi_exclude=test dns_adblocking=true ssh_tunneling=true store_pki=true install_headers=false tests=true local_service_ip=172.16.0.1 no_log=false" if [ "${DEPLOY}" == "docker" ] then diff --git a/tests/lxd-bridge b/tests/lxd-bridge deleted file mode 100644 index ddc59d2..0000000 --- a/tests/lxd-bridge +++ /dev/null @@ -1,16 +0,0 @@ -USE_LXD_BRIDGE="true" -LXD_BRIDGE="lxdbr0" -UPDATE_PROFILE="true" -LXD_CONFILE="/etc/default/algo.conf" -LXD_DOMAIN="lxd" -LXD_IPV4_ADDR="10.0.8.1" -LXD_IPV4_NETMASK="255.255.255.0" -LXD_IPV4_NETWORK="10.0.8.0/24" -LXD_IPV4_DHCP_RANGE="10.0.8.2,10.0.8.254" -LXD_IPV4_DHCP_MAX="250" -LXD_IPV4_NAT="true" -LXD_IPV6_ADDR="" -LXD_IPV6_MASK="" -LXD_IPV6_NETWORK="" -LXD_IPV6_NAT="false" -LXD_IPV6_PROXY="false" diff --git a/tests/pre-deploy.sh b/tests/pre-deploy.sh index 84a0db6..c26164e 100755 --- a/tests/pre-deploy.sh +++ b/tests/pre-deploy.sh @@ -4,10 +4,6 @@ set -euxo pipefail sysctl net.ipv6.conf.all.disable_ipv6=0 -tar xf $HOME/lxc/cache.tar -C / || echo "Didn't extract cache." -cp -f tests/lxd-bridge /etc/default/lxd-bridge -cp -f tests/algo.conf /etc/default/algo.conf - export REPOSITORY=${REPOSITORY:-${GITHUB_REPOSITORY}} export _BRANCH=${BRANCH#refs/heads/} export BRANCH=${_BRANCH:-${GITHUB_REF#refs/heads/}} @@ -18,16 +14,16 @@ else echo -e "#cloud-config\nssh_authorized_keys:\n - $(cat ~/.ssh/id_rsa.pub)" | lxc profile set default user.user-data - fi -systemctl restart lxd-bridge.service lxd-containers.service lxd.service +lxc network set lxdbr0 ipv4.address 10.0.8.1/24 -lxc profile set default raw.lxc lxc.aa_profile=unconfined +lxc profile set default raw.lxc 'lxc.apparmor.profile = unconfined' lxc profile set default security.privileged true lxc profile show default -lxc launch ubuntu:${UBUNTU_VERSION} algo -if [[ ${UBUNTU_VERSION} == "20.04" ]]; then - lxc exec algo -- apt remove snapd --purge -y || true -fi +lxc init ubuntu:${UBUNTU_VERSION} algo +lxc network attach lxdbr0 algo eth0 eth0 +lxc config device set algo eth0 ipv4.address 10.0.8.100 +lxc start algo ip addr @@ -35,4 +31,13 @@ until dig A +short algo.lxd @10.0.8.1 | grep -vE '^$' > /dev/null; do sleep 3 done +case ${UBUNTU_VERSION} in + 20.04|22.04) + lxc exec algo -- apt remove snapd --purge -y || true + ;; + 18.04) + lxc exec algo -- apt install python3.8 -y + ;; +esac + lxc list diff --git a/tests/update-users.sh b/tests/update-users.sh index baa8c82..c34cd0c 100755 --- a/tests/update-users.sh +++ b/tests/update-users.sh @@ -2,7 +2,7 @@ set -ex -USER_ARGS="{ 'server': '10.0.8.100', 'users': ['desktop', 'user1', 'user2'], 'local_service_ip': '172.16.0.1' }" +USER_ARGS="{ 'server': '10.0.8.100', 'users': ['desktop', 'user1', 'user2'], 'local_service_ip': '172.16.0.1', 'no_log': false }" if [ "${DEPLOY}" == "docker" ] then diff --git a/users.yml b/users.yml index 479c28c..e9e8c08 100644 --- a/users.yml +++ b/users.yml @@ -1,6 +1,6 @@ --- - hosts: localhost - gather_facts: False + gather_facts: false tags: always vars_files: - config.cfg @@ -13,7 +13,7 @@ depth: 2 recurse: true hidden: true - patterns: ".config.yml" + patterns: .config.yml register: _configs_list - name: Verify servers @@ -50,23 +50,23 @@ - name: Import host specific variables include_vars: - file: "configs/{{ algo_server }}/.config.yml" + file: configs/{{ algo_server }}/.config.yml - when: ipsec_enabled block: - - name: CA password prompt - pause: - prompt: Enter the password for the private CA key - echo: false - register: _ca_password - when: ca_password is undefined - - - name: Set facts based on the input - set_fact: - CA_password: >- - {% if ca_password is defined %}{{ ca_password }} - {%- elif _ca_password.user_input %}{{ _ca_password.user_input }} - {%- else %}omit{% endif %} + - name: CA password prompt + pause: + prompt: Enter the password for the private CA key + echo: false + register: _ca_password + when: ca_password is undefined + + - name: Set facts based on the input + set_fact: + CA_password: >- + {% if ca_password is defined %}{{ ca_password }} + {%- elif _ca_password.user_input %}{{ _ca_password.user_input }} + {%- else %}omit{% endif %} - name: Local pre-tasks import_tasks: playbooks/cloud-pre.yml @@ -78,7 +78,7 @@ groups: vpn-host ansible_ssh_user: "{{ server_user|default('root') }}" ansible_connection: "{% if algo_server == 'localhost' %}local{% else %}ssh{% endif %}" - ansible_python_interpreter: "/usr/bin/python3" + ansible_python_interpreter: /usr/bin/python3 CA_password: "{{ CA_password|default(omit) }}" rescue: - include_tasks: playbooks/rescue.yml @@ -89,32 +89,32 @@ become: true vars_files: - config.cfg - - "configs/{{ inventory_hostname }}/.config.yml" + - configs/{{ inventory_hostname }}/.config.yml tasks: - block: - - import_role: - name: common - - - import_role: - name: wireguard - when: wireguard_enabled - - - import_role: - name: strongswan - when: ipsec_enabled - tags: ipsec - - - import_role: - name: ssh_tunneling - when: algo_ssh_tunneling - - - debug: - msg: - - "{{ congrats.common.split('\n') }}" - - " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}" - - " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}" - - " {{ congrats.ssh_access if algo_provider != 'local' else ''}}" - tags: always + - import_role: + name: common + + - import_role: + name: wireguard + when: wireguard_enabled + + - import_role: + name: strongswan + when: ipsec_enabled + tags: ipsec + + - import_role: + name: ssh_tunneling + when: algo_ssh_tunneling + + - debug: + msg: + - "{{ congrats.common.split('\n') }}" + - " {{ congrats.p12_pass if algo_ssh_tunneling or ipsec_enabled else '' }}" + - " {{ congrats.ca_key_pass if algo_store_pki and ipsec_enabled else '' }}" + - " {{ congrats.ssh_access if algo_provider != 'local' else ''}}" + tags: always rescue: - include_tasks: playbooks/rescue.yml