Merge branch 'master' into master

pull/14714/head
Lorgio Antonio Jimenez 3 months ago committed by GitHub
commit 90aa093bef
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,3 +1,10 @@
skip_list: skip_list:
- yaml
- '204' - '204'
verbosity: 1 verbosity: 1
warn_list:
- no-changed-when
- no-handler
- fqcn-builtins
- var-spacing

@ -12,3 +12,7 @@ docs
.env .env
logo.png logo.png
tests tests
CHANGELOG.md
PULL_REQUEST_TEMPLATE.md
Vagrantfile
Makefile

@ -0,0 +1,13 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
# Maintain dependencies for Python
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"

@ -0,0 +1,44 @@
name: Create and publish a Docker image
on:
push:
branches: ['master']
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
# set latest tag for master branch
type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }}
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

@ -4,14 +4,17 @@ on: [push, pull_request]
jobs: jobs:
lint: lint:
runs-on: ubuntu-18.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v4
- uses: actions/setup-python@v1 - uses: actions/setup-python@v2.3.2
with: with:
python-version: '3.7' python-version: '3.11'
cache: 'pip'
- name: Install dependencies - name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: | run: |
sudo apt update -y sudo apt update -y
python -m pip install --upgrade pip python -m pip install --upgrade pip
@ -23,46 +26,40 @@ jobs:
run: | run: |
/snap/bin/shellcheck algo install.sh /snap/bin/shellcheck algo install.sh
ansible-playbook main.yml --syntax-check ansible-playbook main.yml --syntax-check
ansible-lint -v *.yml roles/{local,cloud-*}/*/*.yml ansible-lint -x experimental,package-latest,unnamed-task -v *.yml roles/{local,cloud-*}/*/*.yml || true
scripted-deploy: scripted-deploy:
runs-on: ubuntu-16.04 runs-on: ubuntu-20.04
strategy: strategy:
matrix: matrix:
UBUNTU_VERSION: ["18.04", "20.04"] UBUNTU_VERSION: ["22.04"]
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v4
- uses: actions/setup-python@v1 - uses: actions/setup-python@v2.3.2
with: with:
python-version: '3.7' python-version: '3.11'
cache: 'pip'
- name: Install dependencies - name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: | run: |
sudo apt update -y sudo apt update -y
sudo apt install -y \ sudo apt install -y \
python3-pip \
lxd \
expect-dev \
debootstrap \
tree \
bridge-utils \
dnsutils \
build-essential \
libssl-dev \
libffi-dev \
python3-dev \
linux-headers-$(uname -r) \
wireguard \ wireguard \
libxml2-utils \ libxml2-utils \
crudini \ crudini \
fping \ fping \
strongswan \ strongswan \
libstrongswan-standard-plugins \ libstrongswan-standard-plugins \
resolvconf openresolv
python3 -m pip install --upgrade pip python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt python3 -m pip install -r requirements.txt
sudo snap refresh lxd
sudo lxd init --auto
- name: Provision - name: Provision
env: env:
DEPLOY: cloud-init DEPLOY: cloud-init
@ -76,12 +73,14 @@ jobs:
- name: Deployment - name: Deployment
run: | run: |
set -x
until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done until sudo lxc exec algo -- test -f /var/log/cloud-init-output.log; do echo 'Log file not found, Sleep for 3 seconds'; sleep 3; done
( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & ) ( sudo lxc exec algo -- tail -f /var/log/cloud-init-output.log & )
until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do until sudo lxc exec algo -- test -f /var/lib/cloud/data/result.json; do
echo 'Cloud init is not finished. Sleep for 30 seconds'; echo 'Cloud init is not finished. Sleep for 30 seconds';
sleep 30; sleep 30;
done done
sudo lxc exec algo -- cat /var/log/cloud-init-output.log
sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml sudo lxc exec algo -- test -f /opt/algo/configs/localhost/.config.yml
sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ . sudo lxc exec algo -- tar zcf /root/algo-configs.tar -C /opt/algo/configs/ .
sudo lxc file pull algo/root/algo-configs.tar ./ sudo lxc file pull algo/root/algo-configs.tar ./
@ -93,46 +92,39 @@ jobs:
sudo -E bash -x ./tests/wireguard-client.sh sudo -E bash -x ./tests/wireguard-client.sh
sudo env "PATH=$PATH" ./tests/ipsec-client.sh sudo env "PATH=$PATH" ./tests/ipsec-client.sh
local-deploy: docker-deploy:
runs-on: ubuntu-16.04 runs-on: ubuntu-20.04
strategy: strategy:
matrix: matrix:
UBUNTU_VERSION: ["18.04", "20.04"] UBUNTU_VERSION: ["22.04"]
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v4
- uses: actions/setup-python@v1 - uses: actions/setup-python@v2.3.2
with: with:
python-version: '3.7' python-version: '3.11'
cache: 'pip'
- name: Install dependencies - name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
run: | run: |
set -x set -x
sudo add-apt-repository -yu ppa:ubuntu-lxc/stable
sudo apt update -y sudo apt update -y
sudo apt install -y \ sudo apt install -y \
python3-pip \
lxd \
expect-dev \
debootstrap \
tree \
bridge-utils \
dnsutils \
build-essential \
libssl-dev \
libffi-dev \
python3-dev \
linux-headers-$(uname -r) \
wireguard \ wireguard \
libxml2-utils \ libxml2-utils \
crudini \ crudini \
fping \ fping \
strongswan \ strongswan \
libstrongswan-standard-plugins \ libstrongswan-standard-plugins \
resolvconf openresolv
python3 -m pip install --upgrade pip python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt python3 -m pip install -r requirements.txt
sudo snap refresh lxd
sudo lxd init --auto
- name: Provision - name: Provision
env: env:
DEPLOY: docker DEPLOY: docker

1
.gitignore vendored

@ -7,3 +7,4 @@ inventory_users
.DS_Store .DS_Store
venvs/* venvs/*
!venvs/.gitinit !venvs/.gitinit
.vagrant

@ -0,0 +1 @@
* @jackivanov

@ -1,8 +1,7 @@
FROM python:3-alpine FROM python:3.11-alpine
ARG VERSION="git" ARG VERSION="git"
ARG PACKAGES="bash libffi openssh-client openssl rsync tini" ARG PACKAGES="bash libffi openssh-client openssl rsync tini gcc libffi-dev linux-headers make musl-dev openssl-dev rust cargo"
ARG BUILD_PACKAGES="gcc libffi-dev linux-headers make musl-dev openssl-dev"
LABEL name="algo" \ LABEL name="algo" \
version="${VERSION}" \ version="${VERSION}" \
@ -15,13 +14,11 @@ RUN mkdir -p /algo && mkdir -p /algo/configs
WORKDIR /algo WORKDIR /algo
COPY requirements.txt . COPY requirements.txt .
RUN apk --no-cache add ${BUILD_PACKAGES} && \ RUN python3 -m pip --no-cache-dir install -U pip && \
python3 -m pip --no-cache-dir install -U pip && \
python3 -m pip --no-cache-dir install virtualenv && \ python3 -m pip --no-cache-dir install virtualenv && \
python3 -m virtualenv .env && \ python3 -m virtualenv .env && \
source .env/bin/activate && \ source .env/bin/activate && \
python3 -m pip --no-cache-dir install -r requirements.txt && \ python3 -m pip --no-cache-dir install -r requirements.txt
apk del ${BUILD_PACKAGES}
COPY . . COPY . .
RUN chmod 0755 /algo/algo-docker.sh RUN chmod 0755 /algo/algo-docker.sh

@ -1,6 +1,5 @@
# Algo VPN # Algo VPN
[![Join the chat at https://gitter.im/trailofbits/algo](https://badges.gitter.im/trailofbits/algo.svg)](https://gitter.im/trailofbits/algo?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/fold_left.svg?style=social&label=Follow%20%40AlgoVPN)](https://twitter.com/AlgoVPN) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/fold_left.svg?style=social&label=Follow%20%40AlgoVPN)](https://twitter.com/AlgoVPN)
[![](https://github.com/trailofbits/algo/workflows/Main/badge.svg?branch=master)](https://github.com/trailofbits/algo/actions) [![](https://github.com/trailofbits/algo/workflows/Main/badge.svg?branch=master)](https://github.com/trailofbits/algo/actions)
@ -16,7 +15,7 @@ Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireG
* Blocks ads with a local DNS resolver (optional) * Blocks ads with a local DNS resolver (optional)
* Sets up limited SSH users for tunneling traffic (optional) * Sets up limited SSH users for tunneling traffic (optional)
* Based on current versions of Ubuntu and strongSwan * Based on current versions of Ubuntu and strongSwan
* Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, or [your own Ubuntu server (for more advanced users)](docs/deploy-to-ubuntu.md) * Installs to DigitalOcean, Amazon Lightsail, Amazon EC2, Vultr, Microsoft Azure, Google Compute Engine, Scaleway, OpenStack, CloudStack, Hetzner Cloud, Linode, or [your own Ubuntu server (for more advanced users)](docs/deploy-to-ubuntu.md)
## Anti-features ## Anti-features
@ -30,45 +29,42 @@ Algo VPN is a set of Ansible scripts that simplify the setup of a personal WireG
The easiest way to get an Algo server running is to run it on your local system or from [Google Cloud Shell](docs/deploy-from-cloudshell.md) and let it set up a _new_ virtual machine in the cloud for you. The easiest way to get an Algo server running is to run it on your local system or from [Google Cloud Shell](docs/deploy-from-cloudshell.md) and let it set up a _new_ virtual machine in the cloud for you.
1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/) or other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/). 1. **Setup an account on a cloud hosting provider.** Algo supports [DigitalOcean](https://m.do.co/c/4d7f4ff9cfe4) (most user friendly), [Amazon Lightsail](https://aws.amazon.com/lightsail/), [Amazon EC2](https://aws.amazon.com/), [Vultr](https://www.vultr.com/), [Microsoft Azure](https://azure.microsoft.com/), [Google Compute Engine](https://cloud.google.com/compute/), [Scaleway](https://www.scaleway.com/), [DreamCompute](https://www.dreamhost.com/cloud/computing/), [Linode](https://www.linode.com), or other OpenStack-based cloud hosting, [Exoscale](https://www.exoscale.com) or other CloudStack-based cloud hosting, or [Hetzner Cloud](https://www.hetzner.com/).
2. **Get a copy of Algo.** The Algo scripts will be installed on your local system. There are two ways to get a copy: 2. **Get a copy of Algo.** The Algo scripts will be installed on your local system. There are two ways to get a copy:
- Download the [ZIP file](https://github.com/trailofbits/algo/archive/master.zip). Unzip the file to create a directory named `algo-master` containing the Algo scripts. - Download the [ZIP file](https://github.com/trailofbits/algo/archive/master.zip). Unzip the file to create a directory named `algo-master` containing the Algo scripts.
- Run the command `git clone https://github.com/trailofbits/algo.git` to create a directory named `algo` containing the Algo scripts. - Use `git clone` to create a directory named `algo` containing the Algo scripts:
```bash
3. **Install Algo's core dependencies.** Algo requires that **Python 3.6 or later** and at least one supporting package are installed on your system. git clone https://github.com/trailofbits/algo.git
```
- **macOS:** Apple does not provide a suitable version of Python 3 with macOS. Here are two ways to obtain one:
* Use the [Homebrew](https://brew.sh) package manager. After installing Homebrew install Python 3 by running `brew install python3`.
* Download and install the latest stable [Python package](https://www.python.org/downloads/mac-osx/). Be sure to run the included *Install Certificates* command from Finder.
See [Deploy from macOS](docs/deploy-from-macos.md) for more detailed information on installing Python 3 on macOS. 3. **Install Algo's core dependencies.** Algo requires that **Python 3.10 or later** and at least one supporting package are installed on your system.
Once Python 3 is installed on your Mac, from Terminal run: - **macOS:** Catalina (10.15) and higher includes Python 3 as part of the optional Command Line Developer Tools package. From Terminal run:
```bash ```bash
python3 -m pip install --upgrade virtualenv python3 -m pip install --user --upgrade virtualenv
``` ```
- **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. Make sure your system is up-to-date and install the supporting package(s): If prompted, install the Command Line Developer Tools and re-run the above command.
For macOS versions prior to Catalina, see [Deploy from macOS](docs/deploy-from-macos.md) for information on installing Python 3 .
- **Linux:** Recent releases of Ubuntu, Debian, and Fedora come with Python 3 already installed. If your Python version is not 3.10, then you will need to use pyenv to install Python 3.10. Make sure your system is up-to-date and install the supporting package(s):
* Ubuntu and Debian: * Ubuntu and Debian:
```bash ```bash
sudo apt install -y python3-virtualenv sudo apt install -y --no-install-recommends python3-virtualenv file lookup
``` ```
On a Raspberry Pi running Ubuntu also install `libffi-dev` and `libssl-dev`.
* Fedora: * Fedora:
```bash ```bash
sudo dnf install -y python3-virtualenv sudo dnf install -y python3-virtualenv
``` ```
* Red Hat and CentOS 7 and later (for earlier versions see this [documentation](docs/deploy-from-redhat-centos6.md)):
```bash
sudo yum -y install epel-release
sudo yum -y install python36-virtualenv
```
- **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md). - **Windows:** Use the Windows Subsystem for Linux (WSL) to create your own copy of Ubuntu running under Windows from which to install and run Algo. See the [Windows documentation](docs/deploy-from-windows.md) for more information.
4. **Install Algo's remaining dependencies.** You'll need to run these commands from the Algo directory each time you download a new copy of Algo. In a Terminal window `cd` into the `algo-master` (ZIP file) or `algo` (`git clone`) directory and run: 4. **Install Algo's remaining dependencies.** You'll need to run these commands from the Algo directory each time you download a new copy of Algo. In a Terminal window `cd` into the `algo-master` (ZIP file) or `algo` (`git clone`) directory and run:
```bash ```bash
@ -77,11 +73,12 @@ The easiest way to get an Algo server running is to run it on your local system
python3 -m pip install -U pip virtualenv && python3 -m pip install -U pip virtualenv &&
python3 -m pip install -r requirements.txt python3 -m pip install -r requirements.txt
``` ```
On Fedora add the option `--system-site-packages` to the first command above. On macOS install the C compiler if prompted. On Fedora first run `export TMPDIR=/var/tmp`, then add the option `--system-site-packages` to the first command above (after `python3 -m virtualenv`). On macOS install the C compiler if prompted.
5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN. If you want to be able to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features). 5. **Set your configuration options.** Open the file `config.cfg` in your favorite text editor. Specify the users you wish to create in the `users` list. Create a unique user for each device you plan to connect to your VPN.
> Note: [IKEv2 Only] If you want to add or delete users later, you **must** select `yes` at the `Do you want to retain the keys (PKI)?` prompt during the server deployment. You should also review the other options before deployment, as changing your mind about them later [may require you to deploy a brand new server](https://github.com/trailofbits/algo/blob/master/docs/faq.md#i-deployed-an-algo-server-can-you-update-it-with-new-features).
6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available. None are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md). 6. **Start the deployment.** Return to your terminal. In the Algo directory, run `./algo` and follow the instructions. There are several optional features available, none of which are required for a fully functional VPN server. These optional features are described in greater detail in [here](docs/deploy-from-ansible.md).
That's it! You will get the message below when the server deployment process completes. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**. That's it! You will get the message below when the server deployment process completes. Take note of the p12 (user certificate) password and the CA key in case you need them later, **they will only be displayed this time**.
@ -135,6 +132,10 @@ WireGuard works great with Linux clients. See [this page](docs/client-linux-wire
Please see [this page](docs/client-linux-ipsec.md). Please see [this page](docs/client-linux-ipsec.md).
### OpenWrt Wireguard Clients
Please see [this page](docs/client-openwrt-router-wireguard.md).
### Other Devices ### Other Devices
Depending on the platform, you may need one or multiple of the following files. Depending on the platform, you may need one or multiple of the following files.
@ -207,7 +208,6 @@ After this process completes, the Algo VPN server will contain only the users li
* Deploy from [macOS](docs/deploy-from-macos.md) * Deploy from [macOS](docs/deploy-from-macos.md)
* Deploy from [Windows](docs/deploy-from-windows.md) * Deploy from [Windows](docs/deploy-from-windows.md)
* Deploy from [Google Cloud Shell](docs/deploy-from-cloudshell.md) * Deploy from [Google Cloud Shell](docs/deploy-from-cloudshell.md)
* Deploy from [RedHat/CentOS 6.x](docs/deploy-from-redhat-centos6.md)
* Deploy from a [Docker container](docs/deploy-from-docker.md) * Deploy from a [Docker container](docs/deploy-from-docker.md)
### Setup VPN Clients to Connect to the Server ### Setup VPN Clients to Connect to the Server
@ -225,7 +225,7 @@ After this process completes, the Algo VPN server will contain only the users li
* Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md) * Deploy to an [unsupported cloud provider](docs/deploy-to-unsupported-cloud.md)
* Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server * Deploy to your own [FreeBSD](docs/deploy-to-freebsd.md) server
If you've read all the documentation and have further questions, [join the chat on Gitter](https://gitter.im/trailofbits/algo). If you've read all the documentation and have further questions, [create a new discussion](https://github.com/trailofbits/algo/discussions).
## Endorsements ## Endorsements

@ -0,0 +1,9 @@
# Reporting Security Issues
The Algo team and community take security bugs in Algo seriously. We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions.
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/trailofbits/algo/security/) tab.
The Algo team will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
Report security bugs in third-party modules to the person or team maintaining the module.

36
Vagrantfile vendored

@ -0,0 +1,36 @@
Vagrant.configure("2") do |config|
config.vm.box = "bento/ubuntu-20.04"
config.vm.provider "virtualbox" do |v|
v.name = "algo-20.04"
v.memory = "512"
v.cpus = "1"
end
config.vm.synced_folder "./", "/opt/algo", create: true
config.vm.provision "ansible_local" do |ansible|
ansible.playbook = "/opt/algo/main.yml"
# https://github.com/hashicorp/vagrant/issues/12204
ansible.pip_install_cmd = "sudo apt-get install -y python3-pip python-is-python3 && sudo ln -s -f /usr/bin/pip3 /usr/bin/pip"
ansible.install_mode = "pip_args_only"
ansible.pip_args = "-r /opt/algo/requirements.txt"
ansible.inventory_path = "/opt/algo/inventory"
ansible.limit = "local"
ansible.verbose = "-vvvv"
ansible.extra_vars = {
provider: "local",
server: "localhost",
ssh_user: "",
endpoint: "127.0.0.1",
ondemand_cellular: true,
ondemand_wifi: false,
dns_adblocking: true,
ssh_tunneling: true,
store_pki: true,
tests: true,
no_log: false
}
end
end

@ -11,7 +11,7 @@ usage() {
retcode="${1:-0}" retcode="${1:-0}"
echo "To run algo from Docker:" echo "To run algo from Docker:"
echo "" echo ""
echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" trailofbits/algo:latest" echo "docker run --cap-drop=all -it -v <path to configurations>:"${DATA_DIR}" ghcr.io/trailofbits/algo:latest"
echo "" echo ""
exit ${retcode} exit ${retcode}
} }

@ -8,14 +8,14 @@
tasks: tasks:
- block: - block:
- name: Local pre-tasks - name: Local pre-tasks
import_tasks: playbooks/cloud-pre.yml import_tasks: playbooks/cloud-pre.yml
- name: Include a provisioning role - name: Include a provisioning role
include_role: include_role:
name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}" name: "{{ 'local' if algo_provider == 'local' else 'cloud-' + algo_provider }}"
- name: Local post-tasks - name: Local post-tasks
import_tasks: playbooks/cloud-post.yml import_tasks: playbooks/cloud-post.yml
rescue: rescue:
- include_tasks: playbooks/rescue.yml - include_tasks: playbooks/rescue.yml

@ -1,10 +1,10 @@
--- ---
# This is the list of users to generate. # This is the list of users to generate.
# Every device must have a unique username. # Every device must have a unique user.
# You can generate up to 250 users at one time. # You can add up to 65,534 new users over the lifetime of an AlgoVPN.
# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123". # User names with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123".
# Emails are not allowed # Email addresses are not allowed.
users: users:
- avidor.turkewitz - avidor.turkewitz
- bob.nadler - bob.nadler
@ -103,10 +103,17 @@ dnscrypt_servers:
ipv4: ipv4:
- cloudflare - cloudflare
# - google # - google
# - <YourCustomServer> # E.g., if using NextDNS, this will be something like NextDNS-abc123.
# You must also fill in custom_server_stamps below. You may specify
# multiple custom servers.
ipv6: ipv6:
- cloudflare-ipv6 - cloudflare-ipv6
custom_server_stamps:
# YourCustomServer: 'sdns://...'
# DNS servers which will be used if 'dns_encryption' is 'false'. # DNS servers which will be used if 'dns_encryption' is 'false'.
# Fallback resolvers for systemd-resolved
# The default is to use Cloudflare. # The default is to use Cloudflare.
dns_servers: dns_servers:
ipv4: ipv4:
@ -129,7 +136,7 @@ strongswan_log_level: 2
# rightsourceip for ipsec # rightsourceip for ipsec
# ipv4 # ipv4
strongswan_network: 10.19.48.0/24 strongswan_network: 10.48.0.0/16
# ipv6 # ipv6
strongswan_network_ipv6: '2001:db8:4160::/48' strongswan_network_ipv6: '2001:db8:4160::/48'
@ -139,13 +146,15 @@ strongswan_network_ipv6: '2001:db8:4160::/48'
wireguard_PersistentKeepalive: 0 wireguard_PersistentKeepalive: 0
# WireGuard network configuration # WireGuard network configuration
wireguard_network_ipv4: 10.19.49.0/24 wireguard_network_ipv4: 10.49.0.0/16
wireguard_network_ipv6: 2001:db8:a160::/48 wireguard_network_ipv6: 2001:db8:a160::/48
# Randomly generated IP address for the local dns resolver # Randomly generated IP address for the local dns resolver
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
# Hide sensitive data
no_log: true
congrats: congrats:
common: | common: |
@ -171,14 +180,21 @@ SSH_keys:
cloud_providers: cloud_providers:
azure: azure:
size: Standard_B1S size: Standard_B1S
osDisk:
# The storage account type to use for the OS disk. Possible values:
# 'Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS',
# 'Premium_ZRS', 'StandardSSD_ZRS', 'PremiumV2_LRS'.
type: Standard_LRS
image: image:
publisher: Canonical publisher: Canonical
offer: 0001-com-ubuntu-server-focal-daily offer: 0001-com-ubuntu-minimal-jammy-daily
sku: 20_04-daily-lts sku: minimal-22_04-daily-lts
version: latest version: latest
digitalocean: digitalocean:
# See docs for extended droplet options, pricing, and availability.
# Possible values: 's-1vcpu-512mb-10gb', 's-1vcpu-1gb', ...
size: s-1vcpu-1gb size: s-1vcpu-1gb
image: "ubuntu-20-04-x64" image: "ubuntu-22-04-x64"
ec2: ec2:
# Change the encrypted flag to "false" to disable AWS volume encryption. # Change the encrypted flag to "false" to disable AWS volume encryption.
encrypted: true encrypted: true
@ -187,32 +203,39 @@ cloud_providers:
use_existing_eip: false use_existing_eip: false
size: t2.micro size: t2.micro
image: image:
name: "ubuntu-focal-20.04" name: "ubuntu-jammy-22.04"
arch: x86_64
owner: "099720109477" owner: "099720109477"
# Change instance_market_type from "on-demand" to "spot" to launch a spot
# instance. See deploy-from-ansible.md for spot's additional IAM permission
instance_market_type: on-demand
gce: gce:
size: f1-micro size: e2-micro
image: ubuntu-2004-lts image: ubuntu-2204-lts
external_static_ip: false external_static_ip: false
lightsail: lightsail:
size: nano_1_0 size: nano_2_0
image: ubuntu_18_04 image: ubuntu_22_04
scaleway: scaleway:
size: DEV1-S size: DEV1-S
image: Ubuntu 20.04 Focal Fossa image: Ubuntu 22.04 Jammy Jellyfish
arch: x86_64 arch: x86_64
hetzner: hetzner:
server_type: cx11 server_type: cx11
image: ubuntu-20.04 image: ubuntu-22.04
openstack: openstack:
flavor_ram: ">=512" flavor_ram: ">=512"
image: Ubuntu-18.04 image: Ubuntu-22.04
cloudstack: cloudstack:
size: Micro size: Micro
image: Linux Ubuntu 20.04 LTS 64-bit image: Linux Ubuntu 22.04 LTS 64-bit
disk: 10 disk: 10
vultr: vultr:
os: Ubuntu 20.04 x64 os: Ubuntu 22.04 LTS x64
size: 1024 MB RAM,25 GB SSD,1.00 TB BW size: vc2-1c-1gb
linode:
type: g6-nanode-1
image: linode/ubuntu22.04
local: local:
fail_hint: fail_hint:

@ -13,7 +13,7 @@
ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}" ansible_ssh_user: "{{ 'root' if client_ip == 'localhost' else ssh_user }}"
vpn_user: "{{ vpn_user }}" vpn_user: "{{ vpn_user }}"
IP_subject_alt_name: "{{ server_ip }}" IP_subject_alt_name: "{{ server_ip }}"
ansible_python_interpreter: "/usr/bin/python3" ansible_python_interpreter: /usr/bin/python3
- name: Configure the client and install required software - name: Configure the client and install required software
hosts: client-host hosts: client-host

@ -5,8 +5,8 @@ Install strongSwan, then copy the included ipsec_user.conf, ipsec_user.secrets,
## Ubuntu Server example ## Ubuntu Server example
1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan 1. `sudo apt-get install strongswan libstrongswan-standard-plugins`: install strongSwan
2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/ipsec/manual/<name>.crt` 2. `/etc/ipsec.d/certs`: copy `<name>.crt` from `algo-master/configs/<server_ip>/ipsec/.pki/certs/<name>.crt`
3. `/etc/ipsec.d/private`: copy `<name>.key` from `algo-master/configs/<server_ip>/ipsec/manual/<name>.key` 3. `/etc/ipsec.d/private`: copy `<name>.key` from `algo-master/configs/<server_ip>/ipsec/.pki/private/<name>.key`
4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs/<server_ip>/ipsec/manual/cacert.pem` 4. `/etc/ipsec.d/cacerts`: copy `cacert.pem` from `algo-master/configs/<server_ip>/ipsec/manual/cacert.pem`
5. `/etc/ipsec.secrets`: add your `user.key` to the list, e.g. `<server_ip> : ECDSA <name>.key` 5. `/etc/ipsec.secrets`: add your `user.key` to the list, e.g. `<server_ip> : ECDSA <name>.key`
6. `/etc/ipsec.conf`: add the connection from `ipsec_user.conf` and ensure `leftcert` matches the `<name>.crt` filename 6. `/etc/ipsec.conf`: add the connection from `ipsec_user.conf` and ensure `leftcert` matches the `<name>.crt` filename

@ -0,0 +1,88 @@
# Using Router with OpenWRT as a Client with WireGuard
This scenario is useful in case you want to use vpn with devices which has no vpn capability like smart tv, or make vpn connection available via router for multiple devices.
This is a tested, working scenario with following environment:
- algo installed ubuntu at digitalocean
- client side router "TP-Link TL-WR1043ND" with openwrt ver. 21.02.1. [Openwrt Install instructions](https://openwrt.org/toh/tp-link/tl-wr1043nd)
- or client side router "TP-Link Archer C20i AC750" with openwrt ver. 21.02.1. [Openwrt install instructions](https://openwrt.org/toh/tp-link/archer_c20i)
see compatible device list at https://openwrt.org/toh/start . Theoretically any of the device on list should work
## Router setup
Make sure that you have
- router with openwrt installed,
- router is connected to internet,
- router and device in front of router does not have same ip . By default openwrt have 192.168.1.1 if so change it to something like 192.168.2.1
### Install required packages(WebUI)
- Open router web UI (mostly http://192.168.1.1 )
- Login. (by default username: root, password:<empty>
- System -> Software, click "Update lists"
- Install following packages wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
- restart router
### Alternative Install required packages(ssh)
- Open router web UI (mostly http://192.168.1.1 )
- ssh root@192.168.1.1
- opkg update
- opkg install wireguard-tools, kmod-wireguard, luci-app-wireguard, wireguard, kmod-crypto-sha256, kmod-crypto-sha1, kmod-crypto-md5
- reboot
### Create an Interface(WebUI)
- Open router web UI
- Navigate Network -> Interface
- Click "Add new interface"
- Give a Name. e.g. `AlgoVpn`
- Select Protocol. `Wireguard VPN`
- click `Create Interface`
- In *General Settings* tab
- `Bring up on boot` *checked*
- Private key: `Interface -> Private Key` from algo config file
- Ip Address: `Interface -> Address` from algo config file
- In *Peers* tab
- Click add
- Name `algo`
- Public key: `[Peer]->PublicKey` from algo config file
- Preshared key: `[Peer]->PresharedKey` from algo config file
- Allowed IPs: 0.0.0.0/0
- Route Allowed IPs: checked
- Endpoint Host: `[Peer]->Endpoint` ip from algo config file
- Endpoint Port: `[Peer]->Endpoint` port from algo config file
- Persistent Keep Alive: `25`
- Click Save & Save Apply
### Configure Firewall(WebUI)
- Open router web UI
- Navigate to Network -> Firewall
- Click `Add configuration`:
- Name: e.g. ivpn_fw
- Input: Reject
- Output: Accept
- Forward: Reject
- Masquerading: Checked
- MSS clamping: Checked
- Covered networks: Select created VPN interface
- Allow forward to destination zones - Unspecified
- Allow forward from source zones - lan
- Click Save & Save Apply
- Reboot router
There may be additional configuration required depending on environment like dns configuration.
You can also verify the configuration using ssh. /etc/config/network. It should look like
```
config interface 'algo'
option proto 'wireguard'
list addresses '10.0.0.2/32'
option private_key '......' # The private key generated by itself just now
config wireguard_wg0
option public_key '......' # Server's public key
option route_allowed_ips '1'
list allowed_ips '0.0.0.0/0'
option endpoint_host '......' # Server's public ip address
option endpoint_port '51820'
option persistent_keepalive '25'
```

@ -6,18 +6,28 @@ Creating an Amazon AWS account requires giving Amazon a phone number that can re
### Select an EC2 plan ### Select an EC2 plan
The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the "AWS Free Tier." It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices. The cheapest EC2 plan you can choose is the "Free Plan" a.k.a. the ["AWS Free Tier"](https://aws.amazon.com/free/). It is only available to new AWS customers, it has limits on usage, and it converts to standard pricing after 12 months (the "introductory period"). After you exceed the usage limits, after the 12 month period, or if you are an existing AWS customer, then you will pay standard pay-as-you-go service prices.
*Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account. *Note*: Your Algo instance will not stop working when you hit the bandwidth limit, you will just start accumulating service charges on your AWS account.
As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits. As of the time of this writing (July 2018), the Free Tier limits include "750 hours of Amazon EC2 Linux t2.micro instance usage" per month, 15 GB of bandwidth (outbound) per month, and 30 GB of cloud storage. Algo will not even use 1% of the storage limit, but you may have to monitor your bandwidth usage or keep an eye out for the email from Amazon when you are about to exceed the Free Tier limits.
If you are not eligible for the free tier plan or have passed the 12 months of the introductory period, you can switch to [AWS Graviton](https://aws.amazon.com/ec2/graviton/) instances that are generally cheaper. To use the graviton instances, make the following changes in the ec2 section of your `config.cfg` file:
* Set the `size` to `t4g.nano`
* Set the `arch` to `arm64`
> Currently, among all the instance sizes available on AWS, the t4g.nano instance is the least expensive option that does not require any promotional offers. However, AWS is currently running a promotion that provides a free trial of the `t4g.small` instance until December 31, 2023, which is available to all customers. For more information about this promotion, please refer to the [documentation](https://aws.amazon.com/ec2/faqs/#t4g-instances).
Additional configurations are documented in the [EC2 section of the deploy from ansible guide](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#amazon-ec2)
### Create an AWS permissions policy ### Create an AWS permissions policy
In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy. In the AWS console, find the policies menu: click Services > IAM > Policies. Click Create Policy.
Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment). Here, you have the policy editor. Switch to the JSON tab and copy-paste over the existing empty policy with [the minimum required AWS policy needed for Algo deployment](https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md#minimum-required-iam-permissions-for-deployment).
When prompted to name the policy, name it `AlgoVPN_Provisioning`.
![Creating a new permissions policy in the AWS console.](/docs/images/aws-ec2-new-policy.png) ![Creating a new permissions policy in the AWS console.](/docs/images/aws-ec2-new-policy.png)
### Set up an AWS user ### Set up an AWS user
@ -48,22 +58,27 @@ On the final screen, click the Download CSV button. This file includes the AWS a
After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account. After you have downloaded Algo and installed its dependencies, the next step is running Algo to provision the VPN server on your AWS account.
First you will be asked which server type to setup. You would want to enter "2" to use Amazon EC2. First you will be asked which server type to setup. You would want to enter "3" to use Amazon EC2.
``` ```
$ ./algo $ ./algo
What provider would you like to use? What provider would you like to use?
1. DigitalOcean 1. DigitalOcean
2. Amazon EC2 2. Amazon Lightsail
3. Microsoft Azure 3. Amazon EC2
4. Google Compute Engine 4. Microsoft Azure
5. Scaleway 5. Google Compute Engine
6. OpenStack (DreamCompute optimised) 6. Hetzner Cloud
7. Install to existing Ubuntu 16.04 server (Advanced) 7. Vultr
8. Scaleway
9. OpenStack (DreamCompute optimised)
10. CloudStack (Exoscale optimised)
11. Linode
12. Install to existing Ubuntu server (for more advanced users)
Enter the number of your desired provider Enter the number of your desired provider
: 2 : 3
``` ```
Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo). Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key (Secret Access Key) that you received in the CSV file when you setup the account (don't worry if you don't see your text entered in the console; the key input is hidden here by Algo).
@ -72,11 +87,11 @@ Next you will be asked for the AWS Access Key (Access Key ID) and AWS Secret Key
Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) Enter your aws_access_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md). Note: Make sure to use an IAM user with an acceptable policy attached (see https://github.com/trailofbits/algo/blob/master/docs/deploy-from-ansible.md).
[pasted values will not be displayed] [pasted values will not be displayed]
[AKIA...]: [AKIA...]:
Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html) Enter your aws_secret_key (http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html)
[pasted values will not be displayed] [pasted values will not be displayed]
[ABCD...]: [ABCD...]:
``` ```
You will be prompted for the server name to enter. Feel free to leave this as the default ("algo") if you are not certain how this will affect your setup. Here we chose to call it "algovpn". You will be prompted for the server name to enter. Feel free to leave this as the default ("algo") if you are not certain how this will affect your setup. Here we chose to call it "algovpn".
@ -107,7 +122,7 @@ What region should the server be located in?
14. us-east-2 14. us-east-2
15. us-west-1 15. us-west-1
16. us-west-2 16. us-west-2
Enter the number of your desired region Enter the number of your desired region
[13] [13]
: :
@ -116,4 +131,5 @@ Enter the number of your desired region
You will then be asked the remainder of the standard Algo setup questions. You will then be asked the remainder of the standard Algo setup questions.
## Cleanup ## Cleanup
If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack. If you've installed Algo onto EC2 multiple times, your AWS account may become cluttered with unused or deleted resources e.g. instances, VPCs, subnets, etc. This may cause future installs to fail. The easiest way to clean up after you're done with a server is to go to "CloudFormation" from the console and delete the CloudFormation stack associated with that server. Please note that unless you've enabled termination protection on your instance, deleting the stack this way will delete your instance without warning, so be sure you are deleting the correct stack.

@ -1,20 +1,11 @@
### Configuration file ### Configuration file
You need to create a configuration file in INI format with your api key in `$HOME/.cloudstack.ini` Algo scripts will ask you for the API detail. You need to fetch the API credentials and the endpoint from the provider control panel.
``` Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u/<your@account>/account/profile/api to gather the required information: CloudStack api key and secret.
[cloudstack]
endpoint = <endpoint>
key = <your api key>
secret = <your secret>
timeout = 30
```
Example for Exoscale (European cloud provider exposing CloudStack API), visit https://portal.exoscale.com/u/<your@account>/account/profile/api to gather the required information: ```bash
``` export CLOUDSTACK_KEY="<your api key>"
[exoscale] export CLOUDSTACK_SECRET="<your secret>"
endpoint = https://api.exoscale.com/compute export CLOUDSTACK_ENDPOINT="https://api.exoscale.com/compute"
key = <your api key>
secret = <your secret>
timeout = 30
``` ```

@ -18,6 +18,18 @@ You will be returned to the **Tokens/Keys** tab, and your new key will be shown
Copy or note down the hash that shows below the name you entered, as this will be necessary for the steps below. This value will disappear if you leave this page, and you'll need to regenerate it if you forget it. Copy or note down the hash that shows below the name you entered, as this will be necessary for the steps below. This value will disappear if you leave this page, and you'll need to regenerate it if you forget it.
## Select a Droplet (optional)
The default option is the `s-1vcpu-1gb` because it is available in all regions. However, you may want to switch to a cheaper droplet such as `s-1vcpu-512mb-10gb` even though it is not available in all regions. This can be edited in the [Configuration File](config.cfg) under `cloud_providers > digitalocean > size`. See this brief comparison between the two droplets below:
| Droplet Type | Monthly Cost | Bandwidth | Availability |
|:--|:-:|:-:|:--|
| `s-1vcpu-512mb-10gb` | $4/month | 0.5 TB | Limited |
| `s-1vcpu-1gb` | $6/month | 1.0 TB | All regions |
| ... | ... | ... | ... |
*Note: Exceeding bandwidth limits costs $0.01/GiB at time of writing ([docs](https://docs.digitalocean.com/products/billing/bandwidth/#droplets)). See the live list of droplets [here](https://slugs.do-api.dev/).*
## Using DigitalOcean with Algo (interactive) ## Using DigitalOcean with Algo (interactive)
These steps are for those who run Algo using Docker or using the `./algo` command. These steps are for those who run Algo using Docker or using the `./algo` command.

@ -38,4 +38,4 @@ gcloud services enable compute.googleapis.com
**Attention:** take care of the `configs/gce.json` file, which contains the credentials to manage your Google Cloud account, including create and delete servers on this project. **Attention:** take care of the `configs/gce.json` file, which contains the credentials to manage your Google Cloud account, including create and delete servers on this project.
There are more advanced arguments available for deploynment [using ansible](deploy-from-ansible.md). There are more advanced arguments available for deployment [using ansible](deploy-from-ansible.md).

@ -1,3 +1,3 @@
## API Token ## API Token
Sign in into the [Hetzner Cloud Console](https://console.hetzner.cloud/) choose a project, go to `Access` → `Tokens`, and create a new token. Make sure to copy the token because it wont be shown to you again. A token is bound to a project, to interact with the API of another project you have to create a new token inside the project. Sign in into the [Hetzner Cloud Console](https://console.hetzner.cloud/) choose a project, go to `Security` → `API Tokens`, and `Generate API Token` with `Read & Write` access. Make sure to copy the token because it wont be shown to you again. A token is bound to a project. To interact with the API of another project you have to create a new token inside the project.

@ -0,0 +1,9 @@
## API Token
Sign into the Linode Manager and go to the
[tokens management page](https://cloud.linode.com/profile/tokens).
Click `Add a Personal Access Token`. Label your new token and select *at least* the
`Linodes` read/write permission and `StackScripts` read/write permission.
Press `Submit` and make sure to copy the displayed token
as it won't be shown again.

@ -1,9 +1,10 @@
### Configuration file ### Configuration file
Algo requires an API key from your Scaleway account to create a server. Algo requires an API key from your Scaleway account to create a server.
The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/account/credentials](https://console.scaleway.com/account/credentials), and then selecting "Generate new token" on the right side of the box labeled "API Tokens". The API key is generated by going to your Scaleway credentials at [https://console.scaleway.com/project/credentials](https://console.scaleway.com/project/credentials), and then selecting "Generate new API key" on the right side of the box labeled "API Keys".
You'll be ask for to specify a purpose for your API key before it is created. You will then be presented and "Access key" and a "Secret key".
Enter this token when Algo prompts you for the `auth token`. Enter the "Secret key" when Algo prompts you for the `auth token`. You won't need the "Access key".
This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt. This information will be pass as the `algo_scaleway_token` variable when asked for in the Algo prompt.
Your organization ID is also on this page: https://console.scaleway.com/account/credentials Your organization ID is also on this page: https://console.scaleway.com/account/credentials

@ -51,23 +51,24 @@ Cloud roles:
- role: cloud-openstack, [provider: openstack](#openstack) - role: cloud-openstack, [provider: openstack](#openstack)
- role: cloud-cloudstack, [provider: cloudstack](#cloudstack) - role: cloud-cloudstack, [provider: cloudstack](#cloudstack)
- role: cloud-hetzner, [provider: hetzner](#hetzner) - role: cloud-hetzner, [provider: hetzner](#hetzner)
- role: cloud-linode, [provider: linode](#linode)
Server roles: Server roles:
- role: strongswan - role: strongswan
* Installs [strongSwan](https://www.strongswan.org/) - Installs [strongSwan](https://www.strongswan.org/)
* Enables AppArmor, limits CPU and memory access, and drops user privileges - Enables AppArmor, limits CPU and memory access, and drops user privileges
* Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user - Builds a Certificate Authority (CA) with [easy-rsa-ipsec](https://github.com/ValdikSS/easy-rsa-ipsec) and creates one client certificate per user
* Bundles the appropriate certificates into Apple mobileconfig profiles for each user - Bundles the appropriate certificates into Apple mobileconfig profiles for each user
- role: dns_adblocking - role: dns_adblocking
* Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false` - Installs DNS encryption through [dnscrypt-proxy](https://github.com/jedisct1/dnscrypt-proxy) with blacklists to be updated daily from `adblock_lists` in `config.cfg` - note this will occur even if `dns_encryption` in `config.cfg` is set to `false`
* Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations - Constrains dnscrypt-proxy with AppArmor and cgroups CPU and memory limitations
- role: ssh_tunneling - role: ssh_tunneling
* Adds a restricted `algo` group with no shell access and limited SSH forwarding options - Adds a restricted `algo` group with no shell access and limited SSH forwarding options
* Creates one limited, local account and an SSH public key for each user - Creates one limited, local account and an SSH public key for each user
- role: wireguard - role: wireguard
* Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades - Installs a [Wireguard](https://www.wireguard.com/) server, with a startup script, and automatic checks for upgrades
* Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients - Creates wireguard.conf files for Linux clients as well as QR codes for Apple/Android clients
Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables: Note: The `strongswan` role generates Apple profiles with On-Demand Wifi and Cellular if you pass the following variables:
@ -95,7 +96,7 @@ Required variables:
- do_token - do_token
- region - region
Possible options can be gathered calling to https://api.digitalocean.com/v2/regions Possible options can be gathered calling to <https://api.digitalocean.com/v2/regions>
### Amazon EC2 ### Amazon EC2
@ -109,9 +110,26 @@ Possible options can be gathered via cli `aws ec2 describe-regions`
Additional variables: Additional variables:
- [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: false) - [encrypted](https://aws.amazon.com/blogs/aws/new-encrypted-ebs-boot-volumes/) - Encrypted EBS boot volume. Boolean (Default: true)
- [size](https://aws.amazon.com/ec2/instance-types/) - EC2 instance type. String (Default: t2.micro)
- [image](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-images.html) - AMI `describe-images` search parameters to find the OS for the hosted image. Each OS and architecture has a unique AMI-ID. The OS owner, for example [Ubuntu](https://cloud-images.ubuntu.com/locator/ec2/), updates these images often. If parameters below result in multiple results, the most recent AMI-ID is chosen
#### Minimum required IAM permissions for deployment: ```
# Example of equivalent cli command
aws ec2 describe-images --owners "099720109477" --filters "Name=architecture,Values=arm64" "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-jammy-22.04*"
```
- [owners] - The operating system owner id. Default is [Canonical](https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29) (Default: 099720109477)
- [arch] - The architecture (Default: x86_64, Optional: arm64)
- [name] - The wildcard string to filter available ami names. Algo appends this name with the string "-\*64-server-\*", and prepends with "ubuntu/images/hvm-ssd/" (Default: Ubuntu latest LTS)
- [instance_market_type](https://aws.amazon.com/ec2/pricing/) - Two pricing models are supported: on-demand and spot. String (Default: on-demand)
- If using spot instance types, one additional IAM permission along with the below minimum is required for deployment:
```
"ec2:CreateLaunchTemplate"
```
#### Minimum required IAM permissions for deployment
``` ```
{ {
@ -149,14 +167,18 @@ Additional variables:
"Sid": "CloudFormationEC2Access", "Sid": "CloudFormationEC2Access",
"Effect": "Allow", "Effect": "Allow",
"Action": [ "Action": [
"ec2:DescribeRegions",
"ec2:CreateInternetGateway", "ec2:CreateInternetGateway",
"ec2:DescribeVpcs", "ec2:DescribeVpcs",
"ec2:CreateVpc", "ec2:CreateVpc",
"ec2:DescribeInternetGateways", "ec2:DescribeInternetGateways",
"ec2:ModifyVpcAttribute", "ec2:ModifyVpcAttribute",
"ec2:createTags", "ec2:CreateTags",
"ec2:CreateSubnet", "ec2:CreateSubnet",
"ec2:Associate*", "ec2:AssociateVpcCidrBlock",
"ec2:AssociateSubnetCidrBlock",
"ec2:AssociateRouteTable",
"ec2:AssociateAddress",
"ec2:CreateRouteTable", "ec2:CreateRouteTable",
"ec2:AttachInternetGateway", "ec2:AttachInternetGateway",
"ec2:DescribeRouteTables", "ec2:DescribeRouteTables",
@ -213,7 +235,7 @@ Required variables:
Possible options can be gathered via cli `aws lightsail get-regions` Possible options can be gathered via cli `aws lightsail get-regions`
#### Minimum required IAM permissions for deployment: #### Minimum required IAM permissions for deployment
``` ```
{ {
@ -226,7 +248,27 @@ Possible options can be gathered via cli `aws lightsail get-regions`
"lightsail:GetRegions", "lightsail:GetRegions",
"lightsail:GetInstance", "lightsail:GetInstance",
"lightsail:CreateInstances", "lightsail:CreateInstances",
"lightsail:OpenInstancePublicPorts" "lightsail:DisableAddOn",
"lightsail:PutInstancePublicPorts",
"lightsail:StartInstance",
"lightsail:TagResource",
"lightsail:GetStaticIp",
"lightsail:AllocateStaticIp",
"lightsail:AttachStaticIp"
],
"Resource": [
"*"
]
},
{
"Sid": "DeployCloudFormationStack",
"Effect": "Allow",
"Action": [
"cloudformation:CreateStack",
"cloudformation:UpdateStack",
"cloudformation:DescribeStacks",
"cloudformation:DescribeStackEvents",
"cloudformation:ListStackResources"
], ],
"Resource": [ "Resource": [
"*" "*"
@ -264,6 +306,13 @@ Required variables:
- hcloud_token: Your [API token](https://trailofbits.github.io/algo/cloud-hetzner.html#api-token) - can also be defined in the environment as HCLOUD_TOKEN - hcloud_token: Your [API token](https://trailofbits.github.io/algo/cloud-hetzner.html#api-token) - can also be defined in the environment as HCLOUD_TOKEN
- region: e.g. `nbg1` - region: e.g. `nbg1`
### Linode
Required variables:
- linode_token: Your [API token](https://trailofbits.github.io/algo/cloud-linode.html#api-token) - can also be defined in the environment as LINODE_TOKEN
- region: e.g. `us-east`
### Update users ### Update users
Playbook: Playbook:

@ -1,4 +1,5 @@
# Deploy from Google Cloud Shell # Deploy from Google Cloud Shell
**IMPORTANT NOTE: As of 2021-12-14 Algo requires Python 3.8, but Google Cloud Shell only provides Python 3.7.3. The instructions below will not work until Google updates Cloud Shell to have at least Python 3.8.**
If you want to try Algo but don't wish to install the software on your own system you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server. If you want to try Algo but don't wish to install the software on your own system you can use the **free** [Google Cloud Shell](https://cloud.google.com/shell/) to deploy a VPN to any supported cloud provider. Note that you cannot choose `Install to existing Ubuntu server` to turn Google Cloud Shell into your VPN server.

@ -13,28 +13,36 @@ While it is not possible to run your Algo server from within a Docker container,
2. Create a local directory to hold your VPN configs (e.g. `C:\Users\trailofbits\Documents\VPNs\`) 2. Create a local directory to hold your VPN configs (e.g. `C:\Users\trailofbits\Documents\VPNs\`)
3. Create a local copy of [config.cfg](https://github.com/trailofbits/algo/blob/master/config.cfg), with required modifications (e.g. `C:\Users\trailofbits\Documents\VPNs\config.cfg`) 3. Create a local copy of [config.cfg](https://github.com/trailofbits/algo/blob/master/config.cfg), with required modifications (e.g. `C:\Users\trailofbits\Documents\VPNs\config.cfg`)
4. Run the Docker container, mounting your configurations appropriately (assuming the container is named `trailofbits/algo` with a tag `latest`): 4. Run the Docker container, mounting your configurations appropriately (assuming the container is named `trailofbits/algo` with a tag `latest`):
- From Windows:
- From Windows:
```powershell ```powershell
C:\Users\trailofbits> docker run --cap-drop=all -it \ C:\Users\trailofbits> docker run --cap-drop=all -it \
-v C:\Users\trailofbits\Documents\VPNs:/data \ -v C:\Users\trailofbits\Documents\VPNs:/data \
trailofbits/algo:latest ghcr.io/trailofbits/algo:latest
``` ```
- From Linux:
- From Linux:
```bash ```bash
$ docker run --cap-drop=all -it \ $ docker run --cap-drop=all -it \
-v /home/trailofbits/Documents/VPNs:/data \ -v /home/trailofbits/Documents/VPNs:/data \
trailofbits/algo:latest ghcr.io/trailofbits/algo:latest
``` ```
5. When it exits, you'll be left with a fully populated `configs` directory, containing all appropriate configuration data for your clients, and for future server management 5. When it exits, you'll be left with a fully populated `configs` directory, containing all appropriate configuration data for your clients, and for future server management
### Providing Additional Files ### Providing Additional Files
If you need to provide additional files -- like authorization files for Google Cloud Project -- you can simply specify an additional `-v` parameter, and provide the appropriate path when prompted by `algo`. If you need to provide additional files -- like authorization files for Google Cloud Project -- you can simply specify an additional `-v` parameter, and provide the appropriate path when prompted by `algo`.
For example, you can specify `-v C:\Users\trailofbits\Documents\VPNs\gce_auth.json:/algo/gce_auth.json`, making the local path to your credentials JSON file `/algo/gce_auth.json`. For example, you can specify `-v C:\Users\trailofbits\Documents\VPNs\gce_auth.json:/algo/gce_auth.json`, making the local path to your credentials JSON file `/algo/gce_auth.json`.
### Scripted deployment ### Scripted deployment
Ansible variables (see [Deployment from Ansible](deploy-from-ansible.md)) can be passed via `ALGO_ARGS` environment variable. Ansible variables (see [Deployment from Ansible](deploy-from-ansible.md)) can be passed via `ALGO_ARGS` environment variable.
_The leading `-e` (or `--extra-vars`) is required_, e.g. _The leading `-e` (or `--extra-vars`) is required_, e.g.
```bash ```bash
$ ALGO_ARGS="-e $ ALGO_ARGS="-e
provider=digitalocean provider=digitalocean
@ -50,7 +58,7 @@ $ ALGO_ARGS="-e
$ docker run --cap-drop=all -it \ $ docker run --cap-drop=all -it \
-e "ALGO_ARGS=$ALGO_ARGS" \ -e "ALGO_ARGS=$ALGO_ARGS" \
-v /home/trailofbits/Documents/VPNs:/data \ -v /home/trailofbits/Documents/VPNs:/data \
trailofbits/algo:latest ghcr.io/trailofbits/algo:latest
``` ```
## Managing an Algo Server with Docker ## Managing an Algo Server with Docker
@ -58,11 +66,12 @@ $ docker run --cap-drop=all -it \
Even though the container itself is transient, because you've persisted the configuration data, you can use the same Docker image to manage your Algo server. This is done by setting the environment variable `ALGO_ARGS`. Even though the container itself is transient, because you've persisted the configuration data, you can use the same Docker image to manage your Algo server. This is done by setting the environment variable `ALGO_ARGS`.
If you want to use Algo to update the users on an existing server, specify `-e "ALGO_ARGS=update-users"` in your `docker run` command: If you want to use Algo to update the users on an existing server, specify `-e "ALGO_ARGS=update-users"` in your `docker run` command:
```powershell ```powershell
$ docker run --cap-drop=all -it \ $ docker run --cap-drop=all -it \
-e "ALGO_ARGS=update-users" \ -e "ALGO_ARGS=update-users" \
-v C:\Users\trailofbits\Documents\VPNs:/data \ -v C:\Users\trailofbits\Documents\VPNs:/data \
trailofbits/algo:latest ghcr.io/trailofbits/algo:latest
``` ```
## GNU Makefile for Docker ## GNU Makefile for Docker

@ -2,25 +2,29 @@
While you can't turn a macOS system in an AlgoVPN, you can install the Algo scripts on a macOS system and use them to deploy your AlgoVPN to a cloud provider. While you can't turn a macOS system in an AlgoVPN, you can install the Algo scripts on a macOS system and use them to deploy your AlgoVPN to a cloud provider.
Algo uses [Ansible](https://www.ansible.com) which requires Python 3. macOS does not include a version of Python 3 that you can use with Algo. (It does include an obsolete version of Python 2 installed as `/usr/bin/python` which you should ignore.) Algo uses [Ansible](https://www.ansible.com) which requires Python 3. macOS includes an obsolete version of Python 2 installed as `/usr/bin/python` which you should ignore.
You'll need to install Python 3 before you can run Algo. Python 3 is available from several different packagers, three of which are listed below.
## macOS 10.15 Catalina ## macOS 10.15 Catalina
Catalina comes with `/usr/bin/python3` installed. This file, and certain others like `/usr/bin/git`, start out as stub files that prompt you to install the Developer Command Line Tools the first time you run them. Having `git` installed can be useful but whether or not you choose to install the Command Line Tools you **cannot** use this version of Python 3 with Algo at this time. Instead install one of the versions below. Catalina comes with Python 3 installed as `/usr/bin/python3`. This file, and certain others like `/usr/bin/git`, start out as stub files that prompt you to install the Command Line Developer Tools package the first time you run them. This is the easiest way to install Python 3 on Catalina.
Note that Python 3 from Command Line Developer Tools prior to the release for Xcode 11.5 on 2020-05-20 might not work with Algo. If Software Update does not offer to update an older version of the tools you can download a newer version from [here](https://developer.apple.com/download/more/) (Apple ID login required).
## macOS prior to 10.15 Catalina
You'll need to install Python 3 before you can run Algo. Python 3 is available from different packagers, two of which are listed below.
## Ansible and SSL Validation ### Ansible and SSL Validation
Ansible validates SSL network connections using OpenSSL but macOS includes LibreSSL which behaves differently. Therefore each version of Python below includes or depends on its own copy of OpenSSL. Ansible validates SSL network connections using OpenSSL but macOS includes LibreSSL which behaves differently. Therefore each version of Python below includes or depends on its own copy of OpenSSL.
OpenSSL needs access to a list of trusted CA certificates in order to validate SSL connections. Each packager handles initializing this certificate store differently. If you see the error `CERTIFICATE_VERIFY_FAILED` when running Algo make sure you've followed the packager-specific instructions correctly, and that you're not inadvertently running Catalina's `/usr/bin/python3`. OpenSSL needs access to a list of trusted CA certificates in order to validate SSL connections. Each packager handles initializing this certificate store differently. If you see the error `CERTIFICATE_VERIFY_FAILED` when running Algo make sure you've followed the packager-specific instructions correctly.
## Install Python 3 ### Choose a packager and install Python 3
Choose one of the packagers below as your source for Python 3. Avoid installing versions from multiple packagers on the same Mac as you may encounter conflicts. In particular they might fight over creating symbolic links in `/usr/local/bin`. Choose one of the packagers below as your source for Python 3. Avoid installing versions from multiple packagers on the same Mac as you may encounter conflicts. In particular they might fight over creating symbolic links in `/usr/local/bin`.
### Option 1: Install using the Homebrew package manager #### Option 1: Install using the Homebrew package manager
If you're comfortable using the command line in Terminal the [Homebrew](https://brew.sh) project is a great source of software for macOS. If you're comfortable using the command line in Terminal the [Homebrew](https://brew.sh) project is a great source of software for macOS.
@ -28,22 +32,22 @@ First install Homebrew using the instructions on the [Homebrew](https://brew.sh)
The install command below takes care of initializing the CA certificate store. The install command below takes care of initializing the CA certificate store.
#### Installation ##### Installation
``` ```
brew install python3 brew install python3
``` ```
After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/usr/local/bin/python3`. After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/usr/local/bin/python3`.
#### Removal ##### Removal
``` ```
brew uninstall python3 brew uninstall python3
``` ```
### Option 2: Install a package from Python.org #### Option 2: Install the package from Python.org
If you don't want to install a package manager you can download a Python package for macOS from [python.org](https://www.python.org/downloads/mac-osx/). If you don't want to install a package manager you can download the Python package for macOS from [python.org](https://www.python.org/downloads/mac-osx/).
#### Installation ##### Installation
Download the most recent version of Python and install it like any other macOS package. Then initialize the CA certificate store from Finder by double-clicking on the file `Install Certificates.command` found in the `/Applications/Python 3.8` folder. Download the most recent version of Python and install it like any other macOS package. Then initialize the CA certificate store from Finder by double-clicking on the file `Install Certificates.command` found in the `/Applications/Python 3.8` folder.
@ -51,7 +55,7 @@ When you double-click on `Install Certificates.command` a new Terminal window wi
After installation open a new tab or window in Terminal and verify that the command `which python3` returns either `/usr/local/bin/python3` or `/Library/Frameworks/Python.framework/Versions/3.8/bin/python3`. After installation open a new tab or window in Terminal and verify that the command `which python3` returns either `/usr/local/bin/python3` or `/Library/Frameworks/Python.framework/Versions/3.8/bin/python3`.
#### Removal ##### Removal
Unfortunately the python.org package does not include an uninstaller and removing it requires several steps: Unfortunately the python.org package does not include an uninstaller and removing it requires several steps:
@ -60,24 +64,3 @@ Unfortunately the python.org package does not include an uninstaller and removin
3. In Terminal, undo the changes to your `PATH` by running: 3. In Terminal, undo the changes to your `PATH` by running:
```mv ~/.bash_profile.pysave ~/.bash_profile``` ```mv ~/.bash_profile.pysave ~/.bash_profile```
4. In Terminal, remove the dozen or so symbolic links the package created in `/usr/local/bin`. Or just leave them because installing another version of Python will overwrite most of them. 4. In Terminal, remove the dozen or so symbolic links the package created in `/usr/local/bin`. Or just leave them because installing another version of Python will overwrite most of them.
### Option 3: Install using the Macports package manager
[Macports](https://www.macports.org) is another command line based package manager like Homebrew. Most users will find Macports far more complex than Homebrew, but developers might find Macports more flexible. If you search for "Macports vs. Homebrew" you will find many opinions.
First install Macports per the [instructions](https://www.macports.org/install.php).
In addition to installing Python you'll need to install the package containing the CA certificates.
#### Installation
```
sudo port install python38
sudo port install curl-ca-bundle
```
After installation open a new tab or window in Terminal and verify that the command `which python3` returns `/opt/local/bin/python3`.
#### Removal
```
sudo port uninstall python38
sudo port uninstall curl-ca-bundle
```

@ -1,97 +0,0 @@
# RedHat/CentOS 6.x pre-installation requirements
Many people prefer RedHat or CentOS 6 (or similar variants like Amazon Linux) for to their stability and lack of systemd. Unfortunately, there are a number of dated libraries, notably Python 2.6, that prevent Algo from running without errors. This script will prepare a RedHat, CentOS, or similar VM to deploy to Algo cloud instances.
## Step 1: Prep for RH/CentOS 6.8/Amazon
```shell
yum -y update
yum -y install epel-release
```
Enable any kernel updates:
```shell
reboot
```
## Step 2: Install Ansible and launch Algo
RedHat/CentOS 6.x uses Python 2.6 by default, which is explicitly deprecated and produces many warnings and errors, so we must install a safe, non-invasive 3.6 tool set which has to be expressly enabled (and will not survive login sessions and reboots):
- Install the Software Collections Library (to enable Python 3.6)
```shell
yum -y install centos-release-SCL
yum -y install \
openssl-devel \
libffi-devel \
automake \
gcc \
gcc-c++ \
kernel-devel \
rh-python36-python \
rh-python36-python-devel \
rh-python36-python-setuptools \
rh-python36-python-pip \
rh-python36-python-virtualenv \
rh-python36-python-crypto \
rh-python36-PyYAML \
libselinux-python \
python-crypto \
wget \
unzip \
nano
```
- 3.6 will not be used until explicitly enabled, per login session. Enable 3.6 default for this session (needs re-run between logins & reboots)
```
scl enable rh-python36 bash
```
- We're now defaulted to 3.6. Upgrade required components
```
python3 -m pip install -U pip virtualenv pycrypto setuptools
```
- Download and uzip Algo
```
wget https://github.com/trailofbits/algo/archive/master.zip
unzip master.zip
cd algo-master || echo "No Algo directory found"
```
- Set up a virtualenv and install the local Algo dependencies (must be run from algo-master)
```
python3 -m virtualenv --python="$(command -v python3)" .env
source .env/bin/activate
python3 -m pip install -U pip virtualenv
python3 -m pip install -r requirements.txt
```
- Edit the userlist and any other settings you desire
```
nano config.cfg
```
- Now you can run the Algo installer!
```
./algo
```
## Post-install macOS
1. Copy `./configs/*mobileconfig` to your local Mac
2. Install the VPN profile on your Mac (10.10+ required)
```shell
/usr/bin/profiles -I -F ./x.x.x.x_NAME.mobileconfig
```
3. To remove:
```shell
/usr/bin/profiles -D -F ./x.x.x.x_NAME.mobileconfig
```
The VPN connection will now appear under Networks (which can be pinned to the top menu bar if preferred)

@ -21,7 +21,7 @@ Wait a minute for Windows to install a few things in the background (it will eve
2. Click on 'Turn Windows features on or off' 2. Click on 'Turn Windows features on or off'
3. Scroll down and check 'Windows Subsystem for Linux', and then click OK. 3. Scroll down and check 'Windows Subsystem for Linux', and then click OK.
4. The subsystem will be installed, then Windows will require a restart. 4. The subsystem will be installed, then Windows will require a restart.
5. Restart Windows and then [install Ubuntu 18.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-1804-lts/9n9tngvndl3q) (at this time Ubuntu 20.04 LTS does not work with Algo when running under WSL). 5. Restart Windows and then install [Ubuntu 20.04 LTS from the Windows Store](https://www.microsoft.com/p/ubuntu-2004-lts/9n6svws3rx71).
6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows. 6. Run Ubuntu from the Start menu. It will take a few minutes to install. It will have you create a separate user account for the Linux subsystem. Once that's done, you will finally have Ubuntu running somewhat integrated with Windows.
## Install Algo ## Install Algo
@ -39,6 +39,32 @@ git clone https://github.com/trailofbits/algo
cd algo cd algo
``` ```
## Post installation steps
These steps should be only if you clone the Algo repository to the host machine disk (C:, D:, etc.). WSL mount host system disks to `\mnt` directory.
### Allow git to change files metadata
By default git cannot change files metadata (using chmod for example) for files stored at host machine disks (https://docs.microsoft.com/en-us/windows/wsl/wsl-config#set-wsl-launch-settings). Allow it:
1. Start Ubuntu Terminal.
2. Edit /etc/wsl.conf (create it if it doesn't exist). Add the following:
```
[automount]
options = "metadata"
```
3. Close all Ubuntu Terminals.
4. Run powershell.
5. Run `wsl --shutdown` in powershell.
### Allow run Ansible in a world writable directory
Ansible threat host machine directories as world writable directory and do not load .cfg from it by default (https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir). For fix run inside `algo` directory:
```shell
chmod 744 .
```
Now you can continue by following the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) from the 4th step to deploy your Algo server! Now you can continue by following the [README](https://github.com/trailofbits/algo#deploy-the-algo-server) from the 4th step to deploy your Algo server!
You'll be instructed to edit the file `config.cfg` in order to specify the Algo user accounts to be created. If you're new to Linux the simplest editor to use is `nano`. To edit the file while in the `algo` directory, run: You'll be instructed to edit the file `config.cfg` in order to specify the Algo user accounts to be created. If you're new to Linux the simplest editor to use is `nano`. To edit the file while in the `algo` directory, run:

@ -1,18 +1,25 @@
# Local Installation # Local Installation
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.
------
## Outbound VPN Server
You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it to create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment. If you're new to Algo or unfamiliar with Linux you'll find a cloud deployment to be easier. You can use Algo to configure a pre-existing server as an AlgoVPN rather than using it to create and configure a new server on a supported cloud provider. This is referred to as a **local** installation rather than a **cloud** deployment. If you're new to Algo or unfamiliar with Linux you'll find a cloud deployment to be easier.
To perform a local installation, install the Algo scripts following the normal installation instructions, then choose: To perform a local installation, install the Algo scripts following the normal installation instructions, then choose:
``` ```
Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users) Install to existing Ubuntu latest LTS server (for more advanced users)
``` ```
Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`). Make sure your target server is running an unmodified copy of the operating system version specified. The target can be the same system where you've installed the Algo scripts, or a remote system that you are able to access as root via SSH without needing to enter the SSH key passphrase (such as when using `ssh-agent`).
# Road Warrior setup ## Inbound VPN Server (also called "Road Warrior" setup)
Some may find it useful to set up an Algo server on an Ubuntu box on your home LAN, with the intention of being able to securely access your LAN and any resources on it when you're traveling elsewhere (the ["road warrior" setup](https://en.wikipedia.org/wiki/Road_warrior_(computing))). A few tips if you're doing so: Some may find it useful to set up an Algo server on an Ubuntu box on your home LAN, with the intention of being able to securely access your LAN and any resources on it when you're traveling elsewhere (the ["road warrior" setup](https://en.wikipedia.org/wiki/Road_warrior_(computing))). A few tips if you're doing so:
- Make sure you forward any [relevant incoming ports](/docs/firewalls.md#external-firewall) to the Algo server from your router; - Make sure you forward any [relevant incoming ports](/docs/firewalls.md#external-firewall) to the Algo server from your router;
- Change `BetweenClients_DROP` in `config.cfg` to `false`, and also consider changing `block_smb` and `block_netbios` to `false`; - Change `BetweenClients_DROP` in `config.cfg` to `false`, and also consider changing `block_smb` and `block_netbios` to `false`;
- If you want to use a DNS server on your LAN to resolve local domain names properly (e.g. a Pi-hole), set the `dns_encryption` flag in `config.cfg` to `false`, and change `dns_servers` to the local DNS server IP (i.e. `192.168.1.2`). - If you want to use a DNS server on your LAN to resolve local domain names properly (e.g. a Pi-hole), set the `dns_encryption` flag in `config.cfg` to `false`, and change `dns_servers` to the local DNS server IP (i.e. `192.168.1.2`).
**PLEASE NOTE**: Algo is intended for use to create a _dedicated_ VPN server. No uninstallation option is provided. If you install Algo on an existing server any existing services might break. In particular, the firewall rules will be overwritten. See [AlgoVPN and Firewalls](/docs/firewalls.md) for more information.

@ -2,7 +2,7 @@
Algo officially supports the [cloud providers listed here](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another virtual hosting provider, that provider must support: Algo officially supports the [cloud providers listed here](https://github.com/trailofbits/algo/blob/master/README.md#deploy-the-algo-server). If you want to deploy Algo on another virtual hosting provider, that provider must support:
1. the base operating system image that Algo uses (Ubuntu 18.04 or 20.04), and 1. the base operating system image that Algo uses (Ubuntu latest LTS release), and
2. a minimum of certain kernel modules required for the strongSwan IPsec server. 2. a minimum of certain kernel modules required for the strongSwan IPsec server.
Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider. Please see the [Required Kernel Modules](https://wiki.strongswan.org/projects/strongswan/wiki/KernelModules) documentation from strongSwan for a list of the specific required modules and a script to check for them. As a first step, we recommend running their shell script to determine initial compatibility with your new hosting provider.

@ -17,7 +17,7 @@
## Has Algo been audited? ## Has Algo been audited?
No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://empireslacking.herokuapp.com). No. This project is under active development. We're happy to [accept and fix issues](https://github.com/trailofbits/algo/issues) as they are identified. Use Algo at your own risk. If you find a security issue of any severity, please [contact us on Slack](https://slack.empirehacking.nyc).
## What's the current status of WireGuard? ## What's the current status of WireGuard?

@ -22,6 +22,8 @@ First of all, check [this](https://github.com/trailofbits/algo#features) and ens
* [Error: Failed to create symlinks for deploying to localhost](#error-failed-to-create-symlinks-for-deploying-to-localhost) * [Error: Failed to create symlinks for deploying to localhost](#error-failed-to-create-symlinks-for-deploying-to-localhost)
* [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths) * [Wireguard: Unable to find 'configs/...' in expected paths](#wireguard-unable-to-find-configs-in-expected-paths)
* [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password) * [Ubuntu Error: "unable to write 'random state'" when generating CA password](#ubuntu-error-unable-to-write-random-state-when-generating-ca-password)
* [Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160](#old-networking-firewall-in-place)
* [Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; "](#linode-error-uable-to-query-the-linode-api-saw-400-the-requested-distribution-is-not-supported-by-this-stackscript)
* [Connection Problems](#connection-problems) * [Connection Problems](#connection-problems)
* [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites) * [I'm blocked or get CAPTCHAs when I access certain websites](#im-blocked-or-get-captchas-when-i-access-certain-websites)
* [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device) * [I want to change the list of trusted Wifi networks on my Apple device](#i-want-to-change-the-list-of-trusted-wifi-networks-on-my-apple-device)
@ -41,7 +43,7 @@ Look here if you have a problem running the installer to set up a new Algo serve
### Python version is not supported ### Python version is not supported
The minimum Python version required to run Algo is 3.6. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md) The minimum Python version required to run Algo is 3.8. Most modern operation systems should have it by default, but if the OS you are using doesn't meet the requirements, you have to upgrade. See the official documentation for your OS, or manual download it from https://www.python.org/downloads/. Otherwise, you may [deploy from docker](deploy-from-docker.md)
### Error: "You have not agreed to the Xcode license agreements" ### Error: "You have not agreed to the Xcode license agreements"
@ -362,6 +364,32 @@ sudo chown $USER:$USER $HOME/.rnd
Now, run Algo again. Now, run Algo again.
### Old Networking Firewall In Place
You may see the following output when attemptint to run ./algo from your localhost:
```
TASK [Wait until SSH becomes ready...] **********************************************************************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "elapsed": 321, "msg": "Timeout when waiting for search string OpenSSH in xxx.xxx.xxx.xxx:4160"}
included: /home/<username>/algo/algo/playbooks/rescue.yml for localhost
TASK [debug] ************************************************************************************************************************************************
ok: [localhost] => {
"fail_hint": [
"Sorry, but something went wrong!",
"Please check the troubleshooting guide.",
"https://trailofbits.github.io/algo/troubleshooting.html"
]
}
```
If you see this error then one possible explanation is that you have a previous firewall configured in your cloud hosting provider which needs to be either updated or ideally removed. Removing this can often fix this issue.
### Linode Error: "Unable to query the Linode API. Saw: 400: The requested distribution is not supported by this stackscript.; "
StackScript is a custom deployment script that defines a set of configurations for a Linode instance (e.g. which distribution, specs, etc.). if you used algo with default values in the past deployments, a stackscript that would've been created is 're-used' in the deployment process (in fact, go see 'create Linodes' and under 'StackScripts' tab). Thus, there's a little chance that your deployment process will generate this 'unsupported stackscript' error due to a pre-existing StackScript that doesn't support a particular configuration setting or value due to an 'old' stackscript. The quickest solution is just to change the name of your deployment from the default value of 'algo' (or any other name that you've used before, again see the dashboard) and re-run the deployment.
## Connection Problems ## Connection Problems
Look here if you deployed an Algo server but now have a problem connecting to it with a client. Look here if you deployed an Algo server but now have a problem connecting to it with a client.
@ -502,4 +530,4 @@ If your router runs [pfSense](https://www.pfsense.org) and a single IPsec client
## I have a problem not covered here ## I have a problem not covered here
If you have an issue that you cannot solve with the guidance here, [join our Gitter](https://gitter.im/trailofbits/algo) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new). If you have an issue that you cannot solve with the guidance here, [create a new discussion](https://github.com/trailofbits/algo/discussions) and ask for help. If you think you found a new issue in Algo, [file an issue](https://github.com/trailofbits/algo/issues/new).

@ -1,6 +1,7 @@
#!/bin/bash #!/bin/sh
set -eux set -eux
# shellcheck disable=SC2230
which sudo || until \ which sudo || until \
apt-get update -y && \ apt-get update -y && \
apt-get install sudo -yf --install-suggests; do apt-get install sudo -yf --install-suggests; do
@ -15,9 +16,12 @@ cat <<EOF >/etc/ssh/sshd_config
{{ lookup('template', 'files/cloud-init/sshd_config') }} {{ lookup('template', 'files/cloud-init/sshd_config') }}
EOF EOF
test -d /home/algo/.ssh || (umask 077 && sudo -u algo mkdir -p /home/algo/.ssh/) test -d /home/algo/.ssh || sudo -u algo mkdir -m 0700 /home/algo/.ssh
echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" | (umask 177 && sudo -u algo tee /home/algo/.ssh/authorized_keys) echo "{{ lookup('file', '{{ SSH_keys.public }}') }}" | (sudo -u algo tee /home/algo/.ssh/authorized_keys && chmod 0600 /home/algo/.ssh/authorized_keys)
ufw --force reset
# shellcheck disable=SC2015
dpkg -l sshguard && until apt-get remove -y --purge sshguard; do dpkg -l sshguard && until apt-get remove -y --purge sshguard; do
sleep 3 sleep 3
done || true done || true

@ -25,5 +25,6 @@ write_files:
runcmd: runcmd:
- set -x - set -x
- ufw --force reset
- sudo apt-get remove -y --purge sshguard || true - sudo apt-get remove -y --purge sshguard || true
- systemctl restart sshd.service - systemctl restart sshd.service

@ -18,126 +18,126 @@
- { name: Google Compute Engine, alias: gce } - { name: Google Compute Engine, alias: gce }
- { name: Hetzner Cloud, alias: hetzner } - { name: Hetzner Cloud, alias: hetzner }
- { name: Vultr, alias: vultr } - { name: Vultr, alias: vultr }
- { name: Scaleway, alias: scaleway} - { name: Scaleway, alias: scaleway }
- { name: OpenStack (DreamCompute optimised), alias: openstack } - { name: OpenStack (DreamCompute optimised), alias: openstack }
- { name: CloudStack (Exoscale optimised), alias: cloudstack } - { name: CloudStack (Exoscale optimised), alias: cloudstack }
- { name: "Install to existing Ubuntu 18.04 or 20.04 server (for more advanced users)", alias: local } - { name: Linode, alias: linode }
- { name: Install to existing Ubuntu latest LTS server (for more advanced users), alias: local }
vars_files: vars_files:
- config.cfg - config.cfg
tasks: tasks:
- block: - block:
- name: Cloud prompt - name: Cloud prompt
pause: pause:
prompt: | prompt: |
What provider would you like to use? What provider would you like to use?
{% for p in providers_map %} {% for p in providers_map %}
{{ loop.index }}. {{ p['name'] }} {{ loop.index }}. {{ p['name'] }}
{% endfor %} {% endfor %}
Enter the number of your desired provider Enter the number of your desired provider
register: _algo_provider register: _algo_provider
when: provider is undefined when: provider is undefined
- name: Set facts based on the input - name: Set facts based on the input
set_fact: set_fact:
algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}" algo_provider: "{{ provider | default(providers_map[_algo_provider.user_input|default(omit)|int - 1]['alias']) }}"
- name: VPN server name prompt - name: VPN server name prompt
pause: pause:
prompt: | prompt: |
Name the vpn server Name the vpn server
[algo] [algo]
register: _algo_server_name register: _algo_server_name
when: when:
- server_name is undefined - server_name is undefined
- algo_provider != "local" - algo_provider != "local"
- name: Cellular On Demand prompt - name: Cellular On Demand prompt
pause: pause:
prompt: | prompt: |
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks? Do you want macOS/iOS clients to enable "Connect On Demand" when connected to cellular networks?
[y/N] [y/N]
register: _ondemand_cellular register: _ondemand_cellular
when: ondemand_cellular is undefined when: ondemand_cellular is undefined
- name: Wi-Fi On Demand prompt - name: Wi-Fi On Demand prompt
pause: pause:
prompt: | prompt: |
Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi? Do you want macOS/iOS clients to enable "Connect On Demand" when connected to Wi-Fi?
[y/N] [y/N]
register: _ondemand_wifi register: _ondemand_wifi
when: ondemand_wifi is undefined when: ondemand_wifi is undefined
- name: Trusted Wi-Fi networks prompt - name: Trusted Wi-Fi networks prompt
pause: pause:
prompt: | prompt: |
List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand" List the names of any trusted Wi-Fi networks where macOS/iOS clients should not use "Connect On Demand"
(e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi) (e.g., your home network. Comma-separated value, e.g., HomeNet,OfficeWifi,AlgoWiFi)
register: _ondemand_wifi_exclude register: _ondemand_wifi_exclude
when: when:
- ondemand_wifi_exclude is undefined - ondemand_wifi_exclude is undefined
- (ondemand_wifi|default(false)|bool) or - (ondemand_wifi|default(false)|bool) or (booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
(booleans_map[_ondemand_wifi.user_input|default(omit)]|default(false))
- name: Retain the PKI prompt - name: Retain the PKI prompt
pause: pause:
prompt: | prompt: |
Do you want to retain the keys (PKI)? (required to add users in the future, but less secure) Do you want to retain the keys (PKI)? (required to add users in the future, but less secure)
[y/N] [y/N]
register: _store_pki register: _store_pki
when: when:
- store_pki is undefined - store_pki is undefined
- ipsec_enabled - ipsec_enabled
- name: DNS adblocking prompt - name: DNS adblocking prompt
pause: pause:
prompt: | prompt: |
Do you want to enable DNS ad blocking on this VPN server? Do you want to enable DNS ad blocking on this VPN server?
[y/N] [y/N]
register: _dns_adblocking register: _dns_adblocking
when: dns_adblocking is undefined when: dns_adblocking is undefined
- name: SSH tunneling prompt - name: SSH tunneling prompt
pause: pause:
prompt: | prompt: |
Do you want each user to have their own account for SSH tunneling? Do you want each user to have their own account for SSH tunneling?
[y/N] [y/N]
register: _ssh_tunneling register: _ssh_tunneling
when: ssh_tunneling is undefined when: ssh_tunneling is undefined
- name: Set facts based on the input - name: Set facts based on the input
set_fact: set_fact:
algo_server_name: >- algo_server_name: >-
{% if server_name is defined %}{% set _server = server_name %} {% if server_name is defined %}{% set _server = server_name %}
{%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%} {%- elif _algo_server_name.user_input is defined and _algo_server_name.user_input|length > 0 -%}
{%- set _server = _algo_server_name.user_input -%} {%- set _server = _algo_server_name.user_input -%}
{%- else %}{% set _server = defaults['server_name'] %}{% endif -%} {%- else %}{% set _server = defaults['server_name'] %}{% endif -%}
{{ _server | regex_replace('(?!\.)(\W|_)', '-') }} {{ _server | regex_replace('(?!\.)(\W|_)', '-') }}
algo_ondemand_cellular: >- algo_ondemand_cellular: >-
{% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }} {% if ondemand_cellular is defined %}{{ ondemand_cellular | bool }}
{%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }} {%- elif _ondemand_cellular.user_input is defined %}{{ booleans_map[_ondemand_cellular.user_input] | default(defaults['ondemand_cellular']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ondemand_wifi: >- algo_ondemand_wifi: >-
{% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }} {% if ondemand_wifi is defined %}{{ ondemand_wifi | bool }}
{%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }} {%- elif _ondemand_wifi.user_input is defined %}{{ booleans_map[_ondemand_wifi.user_input] | default(defaults['ondemand_wifi']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ondemand_wifi_exclude: >- algo_ondemand_wifi_exclude: >-
{% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }} {% if ondemand_wifi_exclude is defined %}{{ ondemand_wifi_exclude | b64encode }}
{%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%} {%- elif _ondemand_wifi_exclude.user_input is defined and _ondemand_wifi_exclude.user_input|length > 0 -%}
{{ _ondemand_wifi_exclude.user_input | b64encode }} {{ _ondemand_wifi_exclude.user_input | b64encode }}
{%- else %}{{ '_null' | b64encode }}{% endif %} {%- else %}{{ '_null' | b64encode }}{% endif %}
algo_dns_adblocking: >- algo_dns_adblocking: >-
{% if dns_adblocking is defined %}{{ dns_adblocking | bool }} {% if dns_adblocking is defined %}{{ dns_adblocking | bool }}
{%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }} {%- elif _dns_adblocking.user_input is defined %}{{ booleans_map[_dns_adblocking.user_input] | default(defaults['dns_adblocking']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_ssh_tunneling: >- algo_ssh_tunneling: >-
{% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }} {% if ssh_tunneling is defined %}{{ ssh_tunneling | bool }}
{%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }} {%- elif _ssh_tunneling.user_input is defined %}{{ booleans_map[_ssh_tunneling.user_input] | default(defaults['ssh_tunneling']) }}
{%- else %}false{% endif %} {%- else %}false{% endif %}
algo_store_pki: >- algo_store_pki: >-
{% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }} {% if ipsec_enabled %}{%- if store_pki is defined %}{{ store_pki | bool }}
{%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }} {%- elif _store_pki.user_input is defined %}{{ booleans_map[_store_pki.user_input] | default(defaults['store_pki']) }}
{%- else %}false{% endif %}{% endif %} {%- else %}false{% endif %}{% endif %}
rescue: rescue:
- include_tasks: playbooks/rescue.yml - include_tasks: playbooks/rescue.yml

@ -22,16 +22,7 @@ installRequirements() {
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
apt-get update apt-get update
apt-get install \ apt-get install \
software-properties-common \
git \
build-essential \
libssl-dev \
libffi-dev \
python3-dev \
python3-pip \
python3-setuptools \
python3-virtualenv \ python3-virtualenv \
bind9-host \
jq -y jq -y
} }
@ -39,9 +30,9 @@ getAlgo() {
[ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo [ ! -d "algo" ] && git clone "https://github.com/${REPO_SLUG}" -b "${REPO_BRANCH}" algo
cd algo cd algo
python3 -m virtualenv --python="$(command -v python3)" .venv python3 -m virtualenv --python="$(command -v python3)" .env
# shellcheck source=/dev/null # shellcheck source=/dev/null
. .venv/bin/activate . .env/bin/activate
python3 -m pip install -U pip virtualenv python3 -m pip install -U pip virtualenv
python3 -m pip install -r requirements.txt python3 -m pip install -r requirements.txt
} }
@ -50,7 +41,7 @@ publicIpFromInterface() {
echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint." echo "Couldn't find a valid ipv4 address, using the first IP found on the interfaces as the endpoint."
DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')" DEFAULT_INTERFACE="$(ip -4 route list match default | grep -Eo "dev .*" | awk '{print $2}')"
ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b') ENDPOINT=$(ip -4 addr sh dev "$DEFAULT_INTERFACE" | grep -w inet | head -n1 | awk '{print $2}' | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b')
export ENDPOINT=$ENDPOINT export ENDPOINT="${ENDPOINT}"
echo "Using ${ENDPOINT} as the endpoint" echo "Using ${ENDPOINT} as the endpoint"
} }
@ -66,7 +57,7 @@ publicIpFromMetadata() {
fi fi
if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then if echo "${ENDPOINT}" | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b"; then
export ENDPOINT=$ENDPOINT export ENDPOINT="${ENDPOINT}"
echo "Using ${ENDPOINT} as the endpoint" echo "Using ${ENDPOINT} as the endpoint"
else else
publicIpFromInterface publicIpFromInterface
@ -78,7 +69,7 @@ deployAlgo() {
cd /opt/algo cd /opt/algo
# shellcheck source=/dev/null # shellcheck source=/dev/null
. .venv/bin/activate . .env/bin/activate
export HOME=/root export HOME=/root
export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp export ANSIBLE_LOCAL_TEMP=/root/.ansible/tmp

@ -1,110 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
DOCUMENTATION = '''
---
module: cloudstack_zones
short_description: List zones on Apache CloudStack based clouds.
description:
- List zones.
version_added: '0.1'
author: Julien Bachmann (@0xmilkmix)
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: List zones
cloudstack_zones:
register: _cs_zones
'''
RETURN = '''
---
zone:
description: List of zones.
returned: success
type: list
sample:
[
{
"allocationstate": "Enabled",
"dhcpprovider": "VirtualRouter",
"id": "<id>",
"localstorageenabled": true,
"name": "ch-gva-2",
"networktype": "Basic",
"securitygroupsenabled": true,
"tags": [],
"zonetoken": "token"
},
{
"allocationstate": "Enabled",
"dhcpprovider": "VirtualRouter",
"id": "<id>",
"localstorageenabled": true,
"name": "ch-dk-2",
"networktype": "Basic",
"securitygroupsenabled": true,
"tags": [],
"zonetoken": "token"
},
{
"allocationstate": "Enabled",
"dhcpprovider": "VirtualRouter",
"id": "<id>",
"localstorageenabled": true,
"name": "at-vie-1",
"networktype": "Basic",
"securitygroupsenabled": true,
"tags": [],
"zonetoken": "token"
},
{
"allocationstate": "Enabled",
"dhcpprovider": "VirtualRouter",
"id": "<id>",
"localstorageenabled": true,
"name": "de-fra-1",
"networktype": "Basic",
"securitygroupsenabled": true,
"tags": [],
"zonetoken": "token"
}
]
'''
class AnsibleCloudStackZones(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackZones, self).__init__(module)
self.zones = None
def get_zones(self):
args = {}
if not self.zones:
zones = self.query_api('listZones', **args)
if zones:
self.zones = zones
return self.zones
def main():
module = AnsibleModule(argument_spec={})
acs_zones = AnsibleCloudStackZones(module)
result = acs_zones.get_zones()
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,551 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lightsail
short_description: Create or delete a virtual machine instance in AWS Lightsail
description:
- Creates or instances in AWS Lightsail and optionally wait for it to be 'running'.
version_added: "2.4"
author: "Nick Ball (@nickball)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
name:
description:
- Name of the instance
required: true
default : null
zone:
description:
- AWS availability zone in which to launch the instance. Required when state='present'
required: false
default: null
blueprint_id:
description:
- ID of the instance blueprint image. Required when state='present'
required: false
default: null
bundle_id:
description:
- Bundle of specification info for the instance. Required when state='present'
required: false
default: null
user_data:
description:
- Launch script that can configure the instance with additional data
required: false
default: null
key_pair_name:
description:
- Name of the key pair to use with the instance
required: false
default: null
wait:
description:
- Wait for the instance to be in state 'running' before returning. If wait is "no" an ip_address may not be returned
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
open_ports:
description:
- Adds public ports to an Amazon Lightsail instance.
default: null
suboptions:
from_port:
description: Begin of the range
required: true
default: null
to_port:
description: End of the range
required: true
default: null
protocol:
description: Accepted traffic protocol.
required: true
choices:
- udp
- tcp
- all
default: null
requirements:
- "python >= 2.6"
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a new Lightsail instance, register the instance details
- lightsail:
state: present
name: myinstance
region: us-east-1
zone: us-east-1a
blueprint_id: ubuntu_16_04
bundle_id: nano_1_0
key_pair_name: id_rsa
user_data: " echo 'hello world' > /home/ubuntu/test.txt"
wait_timeout: 500
open_ports:
- from_port: 4500
to_port: 4500
protocol: udp
- from_port: 500
to_port: 500
protocol: udp
register: my_instance
- debug:
msg: "Name is {{ my_instance.instance.name }}"
- debug:
msg: "IP is {{ my_instance.instance.publicIpAddress }}"
# Delete an instance if present
- lightsail:
state: absent
region: us-east-1
name: myinstance
'''
RETURN = '''
changed:
description: if a snapshot has been modified/created
returned: always
type: bool
sample:
changed: true
instance:
description: instance data
returned: always
type: dict
sample:
arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
blueprint_id: "ubuntu_16_04"
blueprint_name: "Ubuntu"
bundle_id: "nano_1_0"
created_at: "2017-03-27T08:38:59.714000-04:00"
hardware:
cpu_count: 1
ram_size_in_gb: 0.5
is_static_ip: false
location:
availability_zone: "us-east-1a"
region_name: "us-east-1"
name: "my_instance"
networking:
monthly_transfer:
gb_per_month_allocated: 1024
ports:
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 80
protocol: tcp
to_port: 80
- access_direction: "inbound"
access_from: "Anywhere (0.0.0.0/0)"
access_type: "public"
common_name: ""
from_port: 22
protocol: tcp
to_port: 22
private_ip_address: "172.26.8.14"
public_ip_address: "34.207.152.202"
resource_type: "Instance"
ssh_key_name: "keypair"
state:
code: 16
name: running
support_code: "588307843083/i-0997c97831ee21e33"
username: "ubuntu"
'''
import time
import traceback
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
except ImportError:
# will be caught by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
HAS_BOTO3, camel_dict_to_snake_dict)
def create_instance(module, client, instance_name):
"""
Create an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the new instance.
"""
changed = False
# Check if instance already exists
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
zone = module.params.get('zone')
blueprint_id = module.params.get('blueprint_id')
bundle_id = module.params.get('bundle_id')
user_data = module.params.get('user_data')
user_data = '' if user_data is None else user_data
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
if module.params.get('key_pair_name'):
key_pair_name = module.params.get('key_pair_name')
else:
key_pair_name = ''
if module.params.get('open_ports'):
open_ports = module.params.get('open_ports')
else:
open_ports = '[]'
resp = None
if inst is None:
try:
resp = client.create_instances(
instanceNames=[
instance_name
],
availabilityZone=zone,
blueprintId=blueprint_id,
bundleId=bundle_id,
userData=user_data,
keyPairName=key_pair_name,
)
resp = resp['operations'][0]
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to create instance {0}, error: {1}'.format(instance_name, e))
inst = _find_instance_info(client, instance_name)
# Wait for instance to become running
if wait:
while (wait_max > time.time()) and (inst is not None and inst['state']['name'] != "running"):
try:
time.sleep(2)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="Wait for instance start timeout at %s" % time.asctime())
# Attempt to open ports
if open_ports:
if inst is not None:
try:
for o in open_ports:
resp = client.open_instance_public_ports(
instanceName=instance_name,
portInfo={
'fromPort': o['from_port'],
'toPort': o['to_port'],
'protocol': o['protocol']
}
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error opening ports for instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def delete_instance(module, client, instance_name):
"""
Terminates an instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to delete
Returns a dictionary of instance information
about the instance deleted (pre-deletion).
If the instance to be deleted is running
"changed" will be set to False.
"""
# It looks like deleting removes the instance immediately, nothing to wait for
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before deleting
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to delete instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to delete instance {0}.".format(instance_name), exception=traceback.format_exc())
# sleep and retry
time.sleep(10)
# Attempt to delete
if inst is not None:
while not changed and ((wait and wait_max > time.time()) or (not wait)):
try:
client.delete_instance(instanceName=instance_name)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Error deleting instance {0}, error: {1}'.format(instance_name, e))
# Timed out
if wait and not changed and wait_max <= time.time():
module.fail_json(msg="wait for instance delete timeout at %s" % time.asctime())
return (changed, inst)
def restart_instance(module, client, instance_name):
"""
Reboot an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to reboot
Returns a dictionary of instance information
about the restarted instance
If the instance was not able to reboot,
"changed" will be set to False.
Wait will not apply here as this is an OS-level operation
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to restart instance {0}. Check that you have permissions to perform the operation.".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to restart instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(3)
# send reboot
if inst is not None:
try:
client.reboot_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Unable to reboot instance {0}, error: {1}'.format(instance_name, e))
changed = True
return (changed, inst)
def startstop_instance(module, client, instance_name, state):
"""
Starts or stops an existing instance
module: Ansible module object
client: authenticated lightsail connection object
instance_name: name of instance to start/stop
state: Target state ("running" or "stopped")
Returns a dictionary of instance information
about the instance started/stopped
If the instance was not able to state change,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
wait_max = time.time() + wait_timeout
changed = False
inst = None
try:
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'NotFoundException':
module.fail_json(msg='Error finding instance {0}, error: {1}'.format(instance_name, e))
# Wait for instance to exit transition state before state change
if wait:
while wait_max > time.time() and inst is not None and inst['state']['name'] in ('pending', 'stopping'):
try:
time.sleep(5)
inst = _find_instance_info(client, instance_name)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == "403":
module.fail_json(msg="Failed to start/stop instance {0}. Check that you have permissions to perform the operation".format(instance_name),
exception=traceback.format_exc())
elif e.response['Error']['Code'] == "RequestExpired":
module.fail_json(msg="RequestExpired: Failed to start/stop instance {0}.".format(instance_name), exception=traceback.format_exc())
time.sleep(1)
# Try state change
if inst is not None and inst['state']['name'] != state:
try:
if state == 'running':
client.start_instance(instanceName=instance_name)
else:
client.stop_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(instance_name, e))
changed = True
# Grab current instance info
inst = _find_instance_info(client, instance_name)
return (changed, inst)
def core(module):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
client = None
try:
client = boto3_conn(module, conn_type='client', resource='lightsail',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc())
changed = False
state = module.params['state']
name = module.params['name']
if state == 'absent':
changed, instance_dict = delete_instance(module, client, name)
elif state in ('running', 'stopped'):
changed, instance_dict = startstop_instance(module, client, name, state)
elif state == 'restarted':
changed, instance_dict = restart_instance(module, client, name)
elif state == 'present':
changed, instance_dict = create_instance(module, client, name)
module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(instance_dict))
def _find_instance_info(client, instance_name):
''' handle exceptions where this function is called '''
inst = None
try:
inst = client.get_instance(instanceName=instance_name)
except botocore.exceptions.ClientError as e:
raise
return inst['instance']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']),
zone=dict(type='str'),
blueprint_id=dict(type='str'),
bundle_id=dict(type='str'),
key_pair_name=dict(type='str'),
user_data=dict(type='str'),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=300),
open_ports=dict(type='list')
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
try:
core(module)
except (botocore.exceptions.ClientError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

@ -0,0 +1,113 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.linode import get_user_agent
LINODE_IMP_ERR = None
try:
from linode_api4 import StackScript, LinodeClient
HAS_LINODE_DEPENDENCY = True
except ImportError:
LINODE_IMP_ERR = traceback.format_exc()
HAS_LINODE_DEPENDENCY = False
def create_stackscript(module, client, **kwargs):
"""Creates a stackscript and handles return format."""
try:
response = client.linode.stackscript_create(**kwargs)
return response._raw_json
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def stackscript_available(module, client):
"""Try to retrieve a stackscript."""
try:
label = module.params['label']
desc = module.params['description']
result = client.linode.stackscripts(StackScript.label == label,
StackScript.description == desc,
mine_only=True
)
return result[0]
except IndexError:
return None
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def initialise_module():
"""Initialise the module parameter specification."""
return AnsibleModule(
argument_spec=dict(
label=dict(type='str', required=True),
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
access_token=dict(
type='str',
required=True,
no_log=True,
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
),
script=dict(type='str', required=True),
images=dict(type='list', required=True),
description=dict(type='str', required=False),
public=dict(type='bool', required=False, default=False),
),
supports_check_mode=False
)
def build_client(module):
"""Build a LinodeClient."""
return LinodeClient(
module.params['access_token'],
user_agent=get_user_agent('linode_v4_module')
)
def main():
"""Module entrypoint."""
module = initialise_module()
if not HAS_LINODE_DEPENDENCY:
module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
client = build_client(module)
stackscript = stackscript_available(module, client)
if module.params['state'] == 'present' and stackscript is not None:
module.exit_json(changed=False, stackscript=stackscript._raw_json)
elif module.params['state'] == 'present' and stackscript is None:
stackscript_json = create_stackscript(
module, client,
label=module.params['label'],
script=module.params['script'],
images=module.params['images'],
desc=module.params['description'],
public=module.params['public'],
)
module.exit_json(changed=True, stackscript=stackscript_json)
elif module.params['state'] == 'absent' and stackscript is not None:
stackscript.delete()
module.exit_json(changed=True, stackscript=stackscript._raw_json)
elif module.params['state'] == 'absent' and stackscript is None:
module.exit_json(changed=False, stackscript={})
if __name__ == "__main__":
main()

@ -0,0 +1,142 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.linode import get_user_agent
LINODE_IMP_ERR = None
try:
from linode_api4 import Instance, LinodeClient
HAS_LINODE_DEPENDENCY = True
except ImportError:
LINODE_IMP_ERR = traceback.format_exc()
HAS_LINODE_DEPENDENCY = False
def create_linode(module, client, **kwargs):
"""Creates a Linode instance and handles return format."""
if kwargs['root_pass'] is None:
kwargs.pop('root_pass')
try:
response = client.linode.instance_create(**kwargs)
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
try:
if isinstance(response, tuple):
instance, root_pass = response
instance_json = instance._raw_json
instance_json.update({'root_pass': root_pass})
return instance_json
else:
return response._raw_json
except TypeError:
module.fail_json(msg='Unable to parse Linode instance creation'
' response. Please raise a bug against this'
' module on https://github.com/ansible/ansible/issues'
)
def maybe_instance_from_label(module, client):
"""Try to retrieve an instance based on a label."""
try:
label = module.params['label']
result = client.linode.instances(Instance.label == label)
return result[0]
except IndexError:
return None
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def initialise_module():
"""Initialise the module parameter specification."""
return AnsibleModule(
argument_spec=dict(
label=dict(type='str', required=True),
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
access_token=dict(
type='str',
required=True,
no_log=True,
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
),
authorized_keys=dict(type='list', required=False),
group=dict(type='str', required=False),
image=dict(type='str', required=False),
region=dict(type='str', required=False),
root_pass=dict(type='str', required=False, no_log=True),
tags=dict(type='list', required=False),
type=dict(type='str', required=False),
stackscript_id=dict(type='int', required=False),
),
supports_check_mode=False,
required_one_of=(
['state', 'label'],
),
required_together=(
['region', 'image', 'type'],
)
)
def build_client(module):
"""Build a LinodeClient."""
return LinodeClient(
module.params['access_token'],
user_agent=get_user_agent('linode_v4_module')
)
def main():
"""Module entrypoint."""
module = initialise_module()
if not HAS_LINODE_DEPENDENCY:
module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
client = build_client(module)
instance = maybe_instance_from_label(module, client)
if module.params['state'] == 'present' and instance is not None:
module.exit_json(changed=False, instance=instance._raw_json)
elif module.params['state'] == 'present' and instance is None:
instance_json = create_linode(
module, client,
authorized_keys=module.params['authorized_keys'],
group=module.params['group'],
image=module.params['image'],
label=module.params['label'],
region=module.params['region'],
root_pass=module.params['root_pass'],
tags=module.params['tags'],
ltype=module.params['type'],
stackscript_id=module.params['stackscript_id'],
)
module.exit_json(changed=True, instance=instance_json)
elif module.params['state'] == 'absent' and instance is not None:
instance.delete()
module.exit_json(changed=True, instance=instance._raw_json)
elif module.params['state'] == 'absent' and instance is None:
module.exit_json(changed=False, instance={})
if __name__ == "__main__":
main()

@ -9,7 +9,7 @@
- name: Ensure Ansible is not being run in a world writable directory - name: Ensure Ansible is not being run in a world writable directory
assert: assert:
that: _playbook_dir.stat.mode|int <= 0775 that: _playbook_dir.stat.mode|int <= 775
msg: > msg: >
Ansible is being run in a world writable directory ({{ playbook_dir }}), ignoring it as an ansible.cfg source. Ansible is being run in a world writable directory ({{ playbook_dir }}), ignoring it as an ansible.cfg source.
For more information see https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir For more information see https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
@ -23,27 +23,30 @@
- name: Set required ansible version as a fact - name: Set required ansible version as a fact
set_fact: set_fact:
required_ansible_version: required_ansible_version: "{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d+(.\\d+)?)$', '{\"op\": \"\\g<op>\",\"ver\"\
"{{ item | regex_replace('^ansible[\\s+]?(?P<op>[=,>,<]+)[\\s+]?(?P<ver>\\d.\\d(.\\d+)?)$', : \"\\g<ver>\" }') }}"
'{\"op\": \"\\g<op>\",\"ver\": \"\\g<ver>\" }') }}"
when: '"ansible" in item' when: '"ansible" in item'
with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}" with_items: "{{ lookup('file', 'requirements.txt').splitlines() }}"
- name: Just get the list from default pip
community.general.pip_package_info:
register: pip_package_info
- name: Verify Python meets Algo VPN requirements - name: Verify Python meets Algo VPN requirements
assert: assert:
that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string)|float is version('3.6', '>=') that: (ansible_python.version.major|string + '.' + ansible_python.version.minor|string) is version('3.8', '>=')
msg: > msg: >
Python version is not supported. Python version is not supported.
You must upgrade to at least Python 3.6 to use this version of Algo. You must upgrade to at least Python 3.8 to use this version of Algo.
See for more details - https://trailofbits.github.io/algo/troubleshooting.html#python-version-is-not-supported See for more details - https://trailofbits.github.io/algo/troubleshooting.html#python-version-is-not-supported
- name: Verify Ansible meets Algo VPN requirements - name: Verify Ansible meets Algo VPN requirements
assert: assert:
that: that:
- ansible_version.full is version(required_ansible_version.ver, required_ansible_version.op) - pip_package_info.packages.pip.ansible.0.version is version(required_ansible_version.ver, required_ansible_version.op)
- not ipaddr.failed - not ipaddr.failed
msg: > msg: >
Ansible version is {{ ansible_version.full }}. Ansible version is {{ pip_package_info.packages.pip.ansible.0.version }}.
You must update the requirements to use this version of Algo. You must update the requirements to use this version of Algo.
Try to run python3 -m pip install -U -r requirements.txt Try to run python3 -m pip install -U -r requirements.txt

@ -10,7 +10,7 @@
ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}" ansible_connection: "{% if cloud_instance_ip == 'localhost' %}local{% else %}ssh{% endif %}"
ansible_ssh_user: "{{ ansible_ssh_user|default('root') }}" ansible_ssh_user: "{{ ansible_ssh_user|default('root') }}"
ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}" ansible_ssh_port: "{{ ansible_ssh_port|default(22) }}"
ansible_python_interpreter: "/usr/bin/python3" ansible_python_interpreter: /usr/bin/python3
algo_provider: "{{ algo_provider }}" algo_provider: "{{ algo_provider }}"
algo_server_name: "{{ algo_server_name }}" algo_server_name: "{{ algo_server_name }}"
algo_ondemand_cellular: "{{ algo_ondemand_cellular }}" algo_ondemand_cellular: "{{ algo_ondemand_cellular }}"
@ -33,7 +33,7 @@
wait_for: wait_for:
port: "{{ ansible_ssh_port|default(22) }}" port: "{{ ansible_ssh_port|default(22) }}"
host: "{{ cloud_instance_ip }}" host: "{{ cloud_instance_ip }}"
search_regex: "OpenSSH" search_regex: OpenSSH
delay: 10 delay: 10
timeout: 320 timeout: 320
state: present state: present
@ -44,8 +44,7 @@
when: when:
- pki_in_tmpfs - pki_in_tmpfs
- not algo_store_pki - not algo_store_pki
- ansible_system == "Darwin" or - ansible_system == "Darwin" or ansible_system == "Linux"
ansible_system == "Linux"
- debug: - debug:
var: IP_subject_alt_name var: IP_subject_alt_name

@ -1,54 +1,53 @@
--- ---
- block: - block:
- name: Display the invocation environment - name: Display the invocation environment
shell: > shell: >
./algo-showenv.sh \ ./algo-showenv.sh \
'algo_provider "{{ algo_provider }}"' \ 'algo_provider "{{ algo_provider }}"' \
{% if ipsec_enabled %} {% if ipsec_enabled %}
'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \ 'algo_ondemand_cellular "{{ algo_ondemand_cellular }}"' \
'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \ 'algo_ondemand_wifi "{{ algo_ondemand_wifi }}"' \
'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \ 'algo_ondemand_wifi_exclude "{{ algo_ondemand_wifi_exclude }}"' \
{% endif %} {% endif %}
'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \ 'algo_dns_adblocking "{{ algo_dns_adblocking }}"' \
'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \ 'algo_ssh_tunneling "{{ algo_ssh_tunneling }}"' \
'wireguard_enabled "{{ wireguard_enabled }}"' \ 'wireguard_enabled "{{ wireguard_enabled }}"' \
'dns_encryption "{{ dns_encryption }}"' \ 'dns_encryption "{{ dns_encryption }}"' \
> /dev/tty > /dev/tty || true
tags: debug tags: debug
- name: Install the requirements - name: Install the requirements
pip: pip:
state: latest state: present
name: name:
- pyOpenSSL - pyOpenSSL>=0.15
- jinja2==2.8 - segno
- segno tags:
tags: - always
- always - skip_ansible_lint
- skip_ansible_lint
delegate_to: localhost delegate_to: localhost
become: false become: false
- block: - block:
- name: Generate the SSH private key - name: Generate the SSH private key
openssl_privatekey: openssl_privatekey:
path: "{{ SSH_keys.private }}" path: "{{ SSH_keys.private }}"
size: 2048 size: 4096
mode: "0600" mode: "0600"
type: RSA type: RSA
- name: Generate the SSH public key - name: Generate the SSH public key
openssl_publickey: openssl_publickey:
path: "{{ SSH_keys.public }}" path: "{{ SSH_keys.public }}"
privatekey_path: "{{ SSH_keys.private }}" privatekey_path: "{{ SSH_keys.private }}"
format: OpenSSH format: OpenSSH
- name: Copy the private SSH key to /tmp - name: Copy the private SSH key to /tmp
copy: copy:
src: "{{ SSH_keys.private }}" src: "{{ SSH_keys.private }}"
dest: "{{ SSH_keys.private_tmp }}" dest: "{{ SSH_keys.private_tmp }}"
force: true force: true
mode: '0600' mode: "0600"
delegate_to: localhost delegate_to: localhost
become: false become: false
when: algo_provider != "local" when: algo_provider != "local"

@ -1,5 +1,5 @@
--- ---
- name: Linux | set OS specific facts - name: Linux | set OS specific facts
set_fact: set_fact:
tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}" tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }}
tmpfs_volume_path: /dev/shm tmpfs_volume_path: /dev/shm

@ -1,7 +1,7 @@
--- ---
- name: MacOS | set OS specific facts - name: MacOS | set OS specific facts
set_fact: set_fact:
tmpfs_volume_name: "AlgoVPN-{{ IP_subject_alt_name }}" tmpfs_volume_name: AlgoVPN-{{ IP_subject_alt_name }}
tmpfs_volume_path: /Volumes tmpfs_volume_path: /Volumes
- name: MacOS | mount a ram disk - name: MacOS | mount a ram disk
@ -9,4 +9,4 @@
/usr/sbin/diskutil info "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/" || /usr/sbin/diskutil info "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/" ||
/usr/sbin/diskutil erasevolume HFS+ "{{ tmpfs_volume_name }}" $(hdiutil attach -nomount ram://64000) /usr/sbin/diskutil erasevolume HFS+ "{{ tmpfs_volume_name }}" $(hdiutil attach -nomount ram://64000)
args: args:
creates: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}" creates: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}

@ -9,7 +9,7 @@
- name: Set config paths as facts - name: Set config paths as facts
set_fact: set_fact:
ipsec_pki_path: "/{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/" ipsec_pki_path: /{{ tmpfs_volume_path }}/{{ tmpfs_volume_name }}/IPsec/
- name: Update config paths - name: Update config paths
add_host: add_host:

@ -1,26 +1,26 @@
--- ---
- name: Linux | Delete the PKI directory - name: Linux | Delete the PKI directory
file: file:
path: "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" path: /{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/
state: absent state: absent
when: facts.ansible_system == "Linux" when: facts.ansible_system == "Linux"
- block: - block:
- name: MacOS | check fs the ramdisk exists - name: MacOS | check fs the ramdisk exists
command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}" command: /usr/sbin/diskutil info "{{ facts.tmpfs_volume_name }}"
ignore_errors: true ignore_errors: true
changed_when: false changed_when: false
register: diskutil_info register: diskutil_info
- name: MacOS | unmount and eject the ram disk - name: MacOS | unmount and eject the ram disk
shell: > shell: >
/usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" && /usr/sbin/diskutil umount force "/{{ facts.tmpfs_volume_path }}/{{ facts.tmpfs_volume_name }}/" &&
/usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}" /usr/sbin/diskutil eject "{{ facts.tmpfs_volume_name }}"
changed_when: false changed_when: false
when: diskutil_info.rc == 0 when: diskutil_info.rc == 0
register: result register: result
until: result.rc == 0 until: result.rc == 0
retries: 5 retries: 5
delay: 3 delay: 3
when: when:
- facts.ansible_system == "Darwin" - facts.ansible_system == "Darwin"

@ -1,3 +1,3 @@
ansible==2.9.22 ansible==9.1.0
netaddr jinja2~=3.0.3
netaddr

@ -1,3 +1,3 @@
--- ---
- name: restart strongswan - name: restart strongswan
service: name=strongswan state=restarted service: name={{ strongswan_service }} state=restarted

@ -1,6 +1,6 @@
---
- name: Gather Facts - name: Gather Facts
setup: setup:
- name: Include system based facts and tasks - name: Include system based facts and tasks
import_tasks: systems/main.yml import_tasks: systems/main.yml
@ -22,9 +22,9 @@
- name: Setup the ipsec config - name: Setup the ipsec config
template: template:
src: "roles/strongswan/templates/client_ipsec.conf.j2" src: roles/strongswan/templates/client_ipsec.conf.j2
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf" dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.conf"
mode: '0644' mode: "0644"
with_items: with_items:
- "{{ vpn_user }}" - "{{ vpn_user }}"
notify: notify:
@ -32,9 +32,9 @@
- name: Setup the ipsec secrets - name: Setup the ipsec secrets
template: template:
src: "roles/strongswan/templates/client_ipsec.secrets.j2" src: roles/strongswan/templates/client_ipsec.secrets.j2
dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets" dest: "{{ configs_prefix }}/ipsec.{{ IP_subject_alt_name }}.secrets"
mode: '0600' mode: "0600"
with_items: with_items:
- "{{ vpn_user }}" - "{{ vpn_user }}"
notify: notify:
@ -44,12 +44,12 @@
lineinfile: lineinfile:
dest: "{{ item.dest }}" dest: "{{ item.dest }}"
line: "{{ item.line }}" line: "{{ item.line }}"
create: yes create: true
with_items: with_items:
- dest: "{{ configs_prefix }}/ipsec.conf" - dest: "{{ configs_prefix }}/ipsec.conf"
line: "include ipsec.{{ IP_subject_alt_name }}.conf" line: include ipsec.{{ IP_subject_alt_name }}.conf
- dest: "{{ configs_prefix }}/ipsec.secrets" - dest: "{{ configs_prefix }}/ipsec.secrets"
line: "include ipsec.{{ IP_subject_alt_name }}.secrets" line: include ipsec.{{ IP_subject_alt_name }}.secrets
notify: notify:
- restart strongswan - restart strongswan
@ -66,11 +66,11 @@
src: "{{ item.src }}" src: "{{ item.src }}"
dest: "{{ item.dest }}" dest: "{{ item.dest }}"
with_items: with_items:
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt" - src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/certs/{{ vpn_user }}.crt
dest: "{{ configs_prefix }}/ipsec.d/certs/{{ vpn_user }}.crt" dest: "{{ configs_prefix }}/ipsec.d/certs/{{ vpn_user }}.crt"
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem" - src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/cacert.pem
dest: "{{ configs_prefix }}/ipsec.d/cacerts/{{ IP_subject_alt_name }}.pem" dest: "{{ configs_prefix }}/ipsec.d/cacerts/{{ IP_subject_alt_name }}.pem"
- src: "configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key" - src: configs/{{ IP_subject_alt_name }}/ipsec/.pki/private/{{ vpn_user }}.key
dest: "{{ configs_prefix }}/ipsec.d/private/{{ vpn_user }}.key" dest: "{{ configs_prefix }}/ipsec.d/private/{{ vpn_user }}.key"
notify: notify:
- restart strongswan - restart strongswan

@ -1,5 +1,4 @@
--- ---
- include_tasks: Debian.yml - include_tasks: Debian.yml
when: ansible_distribution == 'Debian' when: ansible_distribution == 'Debian'

@ -1,242 +1,210 @@
--- ---
_azure_regions: > # az account list-locations --query 'sort_by([].{name:name,displayName:displayName,regionalDisplayName:regionalDisplayName}, &name)' -o yaml
[ azure_regions:
{ - displayName: Asia
"displayName": "East Asia", name: asia
"latitude": "22.267", regionalDisplayName: Asia
"longitude": "114.188", - displayName: Asia Pacific
"name": "eastasia", name: asiapacific
"subscriptionId": null regionalDisplayName: Asia Pacific
}, - displayName: Australia
{ name: australia
"displayName": "Southeast Asia", regionalDisplayName: Australia
"latitude": "1.283", - displayName: Australia Central
"longitude": "103.833", name: australiacentral
"name": "southeastasia", regionalDisplayName: (Asia Pacific) Australia Central
"subscriptionId": null - displayName: Australia Central 2
}, name: australiacentral2
{ regionalDisplayName: (Asia Pacific) Australia Central 2
"displayName": "Central US", - displayName: Australia East
"latitude": "41.5908", name: australiaeast
"longitude": "-93.6208", regionalDisplayName: (Asia Pacific) Australia East
"name": "centralus", - displayName: Australia Southeast
"subscriptionId": null name: australiasoutheast
}, regionalDisplayName: (Asia Pacific) Australia Southeast
{ - displayName: Brazil
"displayName": "East US", name: brazil
"latitude": "37.3719", regionalDisplayName: Brazil
"longitude": "-79.8164", - displayName: Brazil South
"name": "eastus", name: brazilsouth
"subscriptionId": null regionalDisplayName: (South America) Brazil South
}, - displayName: Brazil Southeast
{ name: brazilsoutheast
"displayName": "East US 2", regionalDisplayName: (South America) Brazil Southeast
"latitude": "36.6681", - displayName: Canada
"longitude": "-78.3889", name: canada
"name": "eastus2", regionalDisplayName: Canada
"subscriptionId": null - displayName: Canada Central
}, name: canadacentral
{ regionalDisplayName: (Canada) Canada Central
"displayName": "West US", - displayName: Canada East
"latitude": "37.783", name: canadaeast
"longitude": "-122.417", regionalDisplayName: (Canada) Canada East
"name": "westus", - displayName: Central India
"subscriptionId": null name: centralindia
}, regionalDisplayName: (Asia Pacific) Central India
{ - displayName: Central US
"displayName": "North Central US", name: centralus
"latitude": "41.8819", regionalDisplayName: (US) Central US
"longitude": "-87.6278", - displayName: Central US EUAP
"name": "northcentralus", name: centraluseuap
"subscriptionId": null regionalDisplayName: (US) Central US EUAP
}, - displayName: Central US (Stage)
{ name: centralusstage
"displayName": "South Central US", regionalDisplayName: (US) Central US (Stage)
"latitude": "29.4167", - displayName: East Asia
"longitude": "-98.5", name: eastasia
"name": "southcentralus", regionalDisplayName: (Asia Pacific) East Asia
"subscriptionId": null - displayName: East Asia (Stage)
}, name: eastasiastage
{ regionalDisplayName: (Asia Pacific) East Asia (Stage)
"displayName": "North Europe", - displayName: East US
"latitude": "53.3478", name: eastus
"longitude": "-6.2597", regionalDisplayName: (US) East US
"name": "northeurope", - displayName: East US 2
"subscriptionId": null name: eastus2
}, regionalDisplayName: (US) East US 2
{ - displayName: East US 2 EUAP
"displayName": "West Europe", name: eastus2euap
"latitude": "52.3667", regionalDisplayName: (US) East US 2 EUAP
"longitude": "4.9", - displayName: East US 2 (Stage)
"name": "westeurope", name: eastus2stage
"subscriptionId": null regionalDisplayName: (US) East US 2 (Stage)
}, - displayName: East US (Stage)
{ name: eastusstage
"displayName": "Japan West", regionalDisplayName: (US) East US (Stage)
"latitude": "34.6939", - displayName: Europe
"longitude": "135.5022", name: europe
"name": "japanwest", regionalDisplayName: Europe
"subscriptionId": null - displayName: France Central
}, name: francecentral
{ regionalDisplayName: (Europe) France Central
"displayName": "Japan East", - displayName: France South
"latitude": "35.68", name: francesouth
"longitude": "139.77", regionalDisplayName: (Europe) France South
"name": "japaneast", - displayName: Germany North
"subscriptionId": null name: germanynorth
}, regionalDisplayName: (Europe) Germany North
{ - displayName: Germany West Central
"displayName": "Brazil South", name: germanywestcentral
"latitude": "-23.55", regionalDisplayName: (Europe) Germany West Central
"longitude": "-46.633", - displayName: Global
"name": "brazilsouth", name: global
"subscriptionId": null regionalDisplayName: Global
}, - displayName: India
{ name: india
"displayName": "Australia East", regionalDisplayName: India
"latitude": "-33.86", - displayName: Japan
"longitude": "151.2094", name: japan
"name": "australiaeast", regionalDisplayName: Japan
"subscriptionId": null - displayName: Japan East
}, name: japaneast
{ regionalDisplayName: (Asia Pacific) Japan East
"displayName": "Australia Southeast", - displayName: Japan West
"latitude": "-37.8136", name: japanwest
"longitude": "144.9631", regionalDisplayName: (Asia Pacific) Japan West
"name": "australiasoutheast", - displayName: Jio India Central
"subscriptionId": null name: jioindiacentral
}, regionalDisplayName: (Asia Pacific) Jio India Central
{ - displayName: Jio India West
"displayName": "South India", name: jioindiawest
"latitude": "12.9822", regionalDisplayName: (Asia Pacific) Jio India West
"longitude": "80.1636", - displayName: Korea Central
"name": "southindia", name: koreacentral
"subscriptionId": null regionalDisplayName: (Asia Pacific) Korea Central
}, - displayName: Korea South
{ name: koreasouth
"displayName": "Central India", regionalDisplayName: (Asia Pacific) Korea South
"latitude": "18.5822", - displayName: North Central US
"longitude": "73.9197", name: northcentralus
"name": "centralindia", regionalDisplayName: (US) North Central US
"subscriptionId": null - displayName: North Central US (Stage)
}, name: northcentralusstage
{ regionalDisplayName: (US) North Central US (Stage)
"displayName": "West India", - displayName: North Europe
"latitude": "19.088", name: northeurope
"longitude": "72.868", regionalDisplayName: (Europe) North Europe
"name": "westindia", - displayName: Norway East
"subscriptionId": null name: norwayeast
}, regionalDisplayName: (Europe) Norway East
{ - displayName: Norway West
"displayName": "Canada Central", name: norwaywest
"latitude": "43.653", regionalDisplayName: (Europe) Norway West
"longitude": "-79.383", - displayName: Qatar Central
"name": "canadacentral", name: qatarcentral
"subscriptionId": null regionalDisplayName: (Europe) Qatar Central
}, - displayName: South Africa North
{ name: southafricanorth
"displayName": "Canada East", regionalDisplayName: (Africa) South Africa North
"latitude": "46.817", - displayName: South Africa West
"longitude": "-71.217", name: southafricawest
"name": "canadaeast", regionalDisplayName: (Africa) South Africa West
"subscriptionId": null - displayName: South Central US
}, name: southcentralus
{ regionalDisplayName: (US) South Central US
"displayName": "UK South", - displayName: South Central US (Stage)
"latitude": "50.941", name: southcentralusstage
"longitude": "-0.799", regionalDisplayName: (US) South Central US (Stage)
"name": "uksouth", - displayName: Southeast Asia
"subscriptionId": null name: southeastasia
}, regionalDisplayName: (Asia Pacific) Southeast Asia
{ - displayName: Southeast Asia (Stage)
"displayName": "UK West", name: southeastasiastage
"latitude": "53.427", regionalDisplayName: (Asia Pacific) Southeast Asia (Stage)
"longitude": "-3.084", - displayName: South India
"name": "ukwest", name: southindia
"subscriptionId": null regionalDisplayName: (Asia Pacific) South India
}, - displayName: Sweden Central
{ name: swedencentral
"displayName": "West Central US", regionalDisplayName: (Europe) Sweden Central
"latitude": "40.890", - displayName: Sweden South
"longitude": "-110.234", name: swedensouth
"name": "westcentralus", regionalDisplayName: (Europe) Sweden South
"subscriptionId": null - displayName: Switzerland North
}, name: switzerlandnorth
{ regionalDisplayName: (Europe) Switzerland North
"displayName": "West US 2", - displayName: Switzerland West
"latitude": "47.233", name: switzerlandwest
"longitude": "-119.852", regionalDisplayName: (Europe) Switzerland West
"name": "westus2", - displayName: UAE Central
"subscriptionId": null name: uaecentral
}, regionalDisplayName: (Middle East) UAE Central
{ - displayName: UAE North
"displayName": "Korea Central", name: uaenorth
"latitude": "37.5665", regionalDisplayName: (Middle East) UAE North
"longitude": "126.9780", - displayName: United Kingdom
"name": "koreacentral", name: uk
"subscriptionId": null regionalDisplayName: United Kingdom
}, - displayName: UK South
{ name: uksouth
"displayName": "Korea South", regionalDisplayName: (Europe) UK South
"latitude": "35.1796", - displayName: UK West
"longitude": "129.0756", name: ukwest
"name": "koreasouth", regionalDisplayName: (Europe) UK West
"subscriptionId": null - displayName: United States
}, name: unitedstates
{ regionalDisplayName: United States
"displayName": "France Central", - displayName: West Central US
"latitude": "46.3772", name: westcentralus
"longitude": "2.3730", regionalDisplayName: (US) West Central US
"name": "francecentral", - displayName: West Europe
"subscriptionId": null name: westeurope
}, regionalDisplayName: (Europe) West Europe
{ - displayName: West India
"displayName": "France South", name: westindia
"latitude": "43.8345", regionalDisplayName: (Asia Pacific) West India
"longitude": "2.1972", - displayName: West US
"name": "francesouth", name: westus
"subscriptionId": null regionalDisplayName: (US) West US
}, - displayName: West US 2
{ name: westus2
"displayName": "Australia Central", regionalDisplayName: (US) West US 2
"latitude": "-35.3075", - displayName: West US 2 (Stage)
"longitude": "149.1244", name: westus2stage
"name": "australiacentral", regionalDisplayName: (US) West US 2 (Stage)
"subscriptionId": null - displayName: West US 3
}, name: westus3
{ regionalDisplayName: (US) West US 3
"displayName": "Australia Central 2", - displayName: West US (Stage)
"latitude": "-35.3075", name: westusstage
"longitude": "149.1244", regionalDisplayName: (US) West US (Stage)
"name": "australiacentral2",
"subscriptionId": null
},
{
"displayName": "UAE Central",
"latitude": "24.466667",
"longitude": "54.366669",
"name": "uaecentral",
"subscriptionId": null
},
{
"displayName": "UAE North",
"latitude": "25.266666",
"longitude": "55.316666",
"name": "uaenorth",
"subscriptionId": null
},
{
"displayName": "South Africa North",
"latitude": "-25.731340",
"longitude": "28.218370",
"name": "southafricanorth",
"subscriptionId": null
},
{
"displayName": "South Africa West",
"latitude": "-34.075691",
"longitude": "18.843266",
"name": "southafricawest",
"subscriptionId": null
}
]

@ -23,6 +23,9 @@
"imageReferenceVersion": { "imageReferenceVersion": {
"type": "string" "type": "string"
}, },
"osDiskType": {
"type": "string"
},
"SshPort": { "SshPort": {
"type": "int" "type": "int"
}, },
@ -197,7 +200,10 @@
"version": "[parameters('imageReferenceVersion')]" "version": "[parameters('imageReferenceVersion')]"
}, },
"osDisk": { "osDisk": {
"createOption": "FromImage" "createOption": "FromImage",
"managedDisk": {
"storageAccountType": "[parameters('osDiskType')]"
}
} }
}, },
"networkProfile": { "networkProfile": {

@ -37,6 +37,8 @@
value: "{{ cloud_providers.azure.image.sku }}" value: "{{ cloud_providers.azure.image.sku }}"
imageReferenceVersion: imageReferenceVersion:
value: "{{ cloud_providers.azure.image.version }}" value: "{{ cloud_providers.azure.image.version }}"
osDiskType:
value: "{{ cloud_providers.azure.osDisk.type }}"
SshPort: SshPort:
value: "{{ ssh_port }}" value: "{{ ssh_port }}"
UserData: UserData:

@ -6,25 +6,21 @@
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}" subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
- block: - block:
- name: Set facts about the regions - name: Set the default region
set_fact: set_fact:
azure_regions: "{{ _azure_regions|from_json | sort(attribute='name') }}" default_region: >-
- name: Set the default region
set_fact:
default_region: >-
{% for r in azure_regions %}
{%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
{% for r in azure_regions %} {% for r in azure_regions %}
{{ loop.index }}. {{ r['displayName'] }} {%- if r['name'] == "eastus" %}{{ loop.index }}{% endif %}
{% endfor %} {%- endfor %}
- pause:
prompt: |
What region should the server be located in?
{% for r in azure_regions %}
{{ loop.index }}. {{ r['regionalDisplayName'] }}
{% endfor %}
Enter the number of your desired region Enter the number of your desired region
[{{ default_region }}] [{{ default_region }}]
register: _algo_region register: _algo_region
when: region is undefined when: region is undefined

@ -1,42 +1,6 @@
--- ---
- name: Install requirements - name: Install requirements
pip: pip:
name: requirements: https://raw.githubusercontent.com/ansible-collections/azure/v1.13.0/requirements-azure.txt
- packaging
- requests[security]
- azure-cli-core==2.0.35
- azure-cli-nspkg==3.0.2
- azure-common==1.1.11
- azure-mgmt-authorization==0.51.1
- azure-mgmt-batch==5.0.1
- azure-mgmt-cdn==3.0.0
- azure-mgmt-compute==4.4.0
- azure-mgmt-containerinstance==1.4.0
- azure-mgmt-containerregistry==2.0.0
- azure-mgmt-containerservice==4.4.0
- azure-mgmt-dns==2.1.0
- azure-mgmt-keyvault==1.1.0
- azure-mgmt-marketplaceordering==0.1.0
- azure-mgmt-monitor==0.5.2
- azure-mgmt-network==2.3.0
- azure-mgmt-nspkg==2.0.0
- azure-mgmt-redis==5.0.0
- azure-mgmt-resource==2.1.0
- azure-mgmt-rdbms==1.4.1
- azure-mgmt-servicebus==0.5.3
- azure-mgmt-sql==0.10.0
- azure-mgmt-storage==3.1.0
- azure-mgmt-trafficmanager==0.50.0
- azure-mgmt-web==0.41.0
- azure-nspkg==2.0.0
- azure-storage==0.35.1
- msrest==0.6.1
- msrestazure==0.5.0
- azure-keyvault==1.0.0a1
- azure-graphrbac==0.40.0
- azure-mgmt-cosmosdb==0.5.2
- azure-mgmt-hdinsight==0.1.0
- azure-mgmt-devtestlabs==3.0.0
- azure-mgmt-loganalytics==0.2.0
state: latest state: latest
virtualenv_python: python3 virtualenv_python: python3

@ -26,7 +26,7 @@
end_port: "{{ item.end_port }}" end_port: "{{ item.end_port }}"
cidr: "{{ item.range }}" cidr: "{{ item.range }}"
with_items: with_items:
- { proto: tcp, start_port: '{{ ssh_port }}', end_port: '{{ ssh_port }}', range: 0.0.0.0/0 } - { proto: tcp, start_port: "{{ ssh_port }}", end_port: "{{ ssh_port }}", range: 0.0.0.0/0 }
- { proto: udp, start_port: 4500, end_port: 4500, range: 0.0.0.0/0 } - { proto: udp, start_port: 4500, end_port: 4500, range: 0.0.0.0/0 }
- { proto: udp, start_port: 500, end_port: 500, range: 0.0.0.0/0 } - { proto: udp, start_port: 500, end_port: 500, range: 0.0.0.0/0 }
- { proto: udp, start_port: "{{ wireguard_port }}", end_port: "{{ wireguard_port }}", range: 0.0.0.0/0 } - { proto: udp, start_port: "{{ wireguard_port }}", end_port: "{{ wireguard_port }}", range: 0.0.0.0/0 }
@ -54,5 +54,6 @@
ansible_ssh_port: "{{ ssh_port }}" ansible_ssh_port: "{{ ssh_port }}"
cloudinit: true cloudinit: true
environment: environment:
CLOUDSTACK_CONFIG: "{{ algo_cs_config }}" CLOUDSTACK_KEY: "{{ algo_cs_key }}"
CLOUDSTACK_REGION: "{{ algo_cs_region }}" CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"

@ -1,54 +1,65 @@
--- ---
- block: - block:
- pause: - pause:
prompt: | prompt: |
Enter path for cloudstack.ini file (https://trailofbits.github.io/algo/cloud-cloudstack.html) Enter the API key (https://trailofbits.github.io/algo/cloud-cloudstack.html):
[~/.cloudstack.ini] echo: false
register: _cs_config register: _cs_key
when: when:
- cs_config is undefined - cs_key is undefined
- lookup('env', 'CLOUDSTACK_CONFIG') | length <= 0 - lookup('env','CLOUDSTACK_KEY')|length <= 0
- pause: - pause:
prompt: | prompt: |
Specify region to use in cloudstack.ini file Enter the API ssecret (https://trailofbits.github.io/algo/cloud-cloudstack.html):
[exoscale] echo: false
register: _cs_region register: _cs_secret
when: when:
- cs_region is undefined - cs_secret is undefined
- lookup('env', 'CLOUDSTACK_REGION') | length <= 0 - lookup('env','CLOUDSTACK_SECRET')|length <= 0
- set_fact: - pause:
algo_cs_config: "{{ cs_config | default(_cs_config.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_CONFIG'), true) | default('~/.cloudstack.ini', true) }}" prompt: |
algo_cs_region: "{{ cs_region | default(_cs_region.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_REGION'), true) | default('exoscale', true) }}" Enter the API endpoint (https://trailofbits.github.io/algo/cloud-cloudstack.html)
[https://api.exoscale.com/compute]
- name: Get zones on cloud register: _cs_url
cloudstack_zones: when:
register: _cs_zones - cs_url is undefined
environment: - lookup('env', 'CLOUDSTACK_ENDPOINT') | length <= 0
CLOUDSTACK_CONFIG: "{{ algo_cs_config }}"
CLOUDSTACK_REGION: "{{ algo_cs_region }}" - set_fact:
algo_cs_key: "{{ cs_key | default(_cs_key.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_KEY'), true) }}"
- name: Extract zones from output algo_cs_token: "{{ cs_secret | default(_cs_secret.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_SECRET'), true) }}"
set_fact: algo_cs_url: "{{ cs_url | default(_cs_url.user_input|default(None)) | default(lookup('env', 'CLOUDSTACK_ENDPOINT'), true) | default('https://api.exoscale.com/compute',\
cs_zones: "{{ _cs_zones['zone'] | sort(attribute='name') }}" \ true) }}"
- name: Set the default zone - name: Get zones on cloud
set_fact: cs_zone_info:
default_zone: >- register: _cs_zones
{% for z in cs_zones %} environment:
{%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %} CLOUDSTACK_KEY: "{{ algo_cs_key }}"
{%- endfor %} CLOUDSTACK_SECRET: "{{ algo_cs_token }}"
CLOUDSTACK_ENDPOINT: "{{ algo_cs_url }}"
- pause:
prompt: | - name: Extract zones from output
What zone should the server be located in? set_fact:
cs_zones: "{{ _cs_zones['zones'] | sort(attribute='name') }}"
- name: Set the default zone
set_fact:
default_zone: >-
{% for z in cs_zones %} {% for z in cs_zones %}
{{ loop.index }}. {{ z['name'] }} {%- if z['name'] == "ch-gva-2" %}{{ loop.index }}{% endif %}
{% endfor %} {%- endfor %}
Enter the number of your desired zone - pause:
[{{ default_zone }}] prompt: |
register: _algo_region What zone should the server be located in?
when: region is undefined {% for z in cs_zones %}
{{ loop.index }}. {{ z['name'] }}
{% endfor %}
Enter the number of your desired zone
[{{ default_zone }}]
register: _algo_region
when: region is undefined

@ -2,14 +2,14 @@
- name: Include prompts - name: Include prompts
import_tasks: prompts.yml import_tasks: prompts.yml
- name: "Upload the SSH key" - name: Upload the SSH key
digital_ocean_sshkey: digital_ocean_sshkey:
oauth_token: "{{ algo_do_token }}" oauth_token: "{{ algo_do_token }}"
name: "{{ SSH_keys.comment }}" name: "{{ SSH_keys.comment }}"
ssh_pub_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}" ssh_pub_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
register: do_ssh_key register: do_ssh_key
- name: "Creating a droplet..." - name: Creating a droplet...
digital_ocean_droplet: digital_ocean_droplet:
state: present state: present
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
@ -26,21 +26,25 @@
- Environment:Algo - Environment:Algo
register: digital_ocean_droplet register: digital_ocean_droplet
# Return data is not idempotent
- set_fact:
droplet: "{{ digital_ocean_droplet.data.droplet | default(digital_ocean_droplet.data) }}"
- block: - block:
- name: "Create a Floating IP" - name: Create a Floating IP
digital_ocean_floating_ip: digital_ocean_floating_ip:
state: present state: present
oauth_token: "{{ algo_do_token }}" oauth_token: "{{ algo_do_token }}"
droplet_id: "{{ digital_ocean_droplet.data.droplet.id }}" droplet_id: "{{ droplet.id }}"
register: digital_ocean_floating_ip register: digital_ocean_floating_ip
- name: Set the static ip as a fact - name: Set the static ip as a fact
set_fact: set_fact:
cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}" cloud_alternative_ingress_ip: "{{ digital_ocean_floating_ip.data.floating_ip.ip }}"
when: alternative_ingress_ip when: alternative_ingress_ip
- set_fact: - set_fact:
cloud_instance_ip: "{{ digital_ocean_droplet.data.ip_address }}" cloud_instance_ip: "{{ (droplet.networks.v4 | selectattr('type', '==', 'public')).0.ip_address }}"
ansible_ssh_user: algo ansible_ssh_user: algo
ansible_ssh_port: "{{ ssh_port }}" ansible_ssh_port: "{{ ssh_port }}"
cloudinit: true cloudinit: true

@ -18,13 +18,13 @@
method: GET method: GET
status_code: 200 status_code: 200
headers: headers:
Content-Type: "application/json" Content-Type: application/json
Authorization: "Bearer {{ algo_do_token }}" Authorization: Bearer {{ algo_do_token }}
register: _do_regions register: _do_regions
- name: Set facts about the regions - name: Set facts about the regions
set_fact: set_fact:
do_regions: "{{ _do_regions.json.regions | sort(attribute='slug') }}" do_regions: "{{ _do_regions.json.regions | selectattr('available', 'true') | sort(attribute='slug') }}"
- name: Set default region - name: Set default region
set_fact: set_fact:

@ -20,9 +20,17 @@ Parameters:
Type: String Type: String
SshPort: SshPort:
Type: String Type: String
InstanceMarketTypeParameter:
Description: Launch a Spot instance or standard on-demand instance
Type: String
Default: on-demand
AllowedValues:
- spot
- on-demand
Conditions: Conditions:
AllocateNewEIP: !Equals [!Ref UseThisElasticIP, ''] AllocateNewEIP: !Equals [!Ref UseThisElasticIP, '']
AssociateExistingEIP: !Not [!Equals [!Ref UseThisElasticIP, '']] AssociateExistingEIP: !Not [!Equals [!Ref UseThisElasticIP, '']]
InstanceIsSpot: !Equals [spot, !Ref InstanceMarketTypeParameter]
Resources: Resources:
VPC: VPC:
Type: AWS::EC2::VPC Type: AWS::EC2::VPC
@ -146,6 +154,15 @@ Resources:
- Key: Name - Key: Name
Value: !Ref AWS::StackName Value: !Ref AWS::StackName
EC2LaunchTemplate:
Type: AWS::EC2::LaunchTemplate
Condition: InstanceIsSpot # Only create this template if requested
Properties: # a spot instance_market_type in config.cfg
LaunchTemplateName: !Ref AWS::StackName
LaunchTemplateData:
InstanceMarketOptions:
MarketType: spot
EC2Instance: EC2Instance:
Type: AWS::EC2::Instance Type: AWS::EC2::Instance
DependsOn: DependsOn:
@ -169,6 +186,14 @@ Resources:
SubnetId: !Ref Subnet SubnetId: !Ref Subnet
Ipv6AddressCount: 1 Ipv6AddressCount: 1
UserData: !Ref UserData UserData: !Ref UserData
LaunchTemplate:
!If # Only if Conditions created "EC2LaunchTemplate"
- InstanceIsSpot
-
LaunchTemplateId:
!Ref EC2LaunchTemplate
Version: 1
- !Ref AWS::NoValue # Else this LaunchTemplate not set
Tags: Tags:
- Key: Name - Key: Name
Value: !Ref AWS::StackName Value: !Ref AWS::StackName

@ -4,7 +4,7 @@
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
stack_name: "{{ stack_name }}" stack_name: "{{ stack_name }}"
state: "present" state: present
region: "{{ algo_region }}" region: "{{ algo_region }}"
template: roles/cloud-ec2/files/stack.yaml template: roles/cloud-ec2/files/stack.yaml
template_parameters: template_parameters:
@ -16,6 +16,7 @@
EbsEncrypted: "{{ encrypted }}" EbsEncrypted: "{{ encrypted }}"
UserData: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}" UserData: "{{ lookup('template', 'files/cloud-init/base.yml') | b64encode }}"
SshPort: "{{ ssh_port }}" SshPort: "{{ ssh_port }}"
InstanceMarketTypeParameter: "{{ cloud_providers.ec2.instance_market_type }}"
tags: tags:
Environment: Algo Environment: Algo
register: stack register: stack

@ -6,13 +6,14 @@
import_tasks: prompts.yml import_tasks: prompts.yml
- name: Locate official AMI for region - name: Locate official AMI for region
ec2_ami_facts: ec2_ami_info:
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
owners: "{{ cloud_providers.ec2.image.owner }}" owners: "{{ cloud_providers.ec2.image.owner }}"
region: "{{ algo_region }}" region: "{{ algo_region }}"
filters: filters:
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*" architecture: "{{ cloud_providers.ec2.image.arch }}"
name: ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-*64-server-*
register: ami_search register: ami_search
- name: Set the ami id as a fact - name: Set the ami id as a fact

@ -6,8 +6,8 @@
echo: false echo: false
register: _aws_access_key register: _aws_access_key
when: when:
- aws_access_key is undefined - aws_access_key is undefined
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0 - lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
- pause: - pause:
prompt: | prompt: |
@ -23,35 +23,35 @@
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}" secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
- block: - block:
- name: Get regions - name: Get regions
aws_region_facts: aws_region_info:
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
region: us-east-1 region: us-east-1
register: _aws_regions register: _aws_regions
- name: Set facts about the regions - name: Set facts about the regions
set_fact: set_fact:
aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}" aws_regions: "{{ _aws_regions.regions | sort(attribute='region_name') }}"
- name: Set the default region - name: Set the default region
set_fact: set_fact:
default_region: >- default_region: >-
{% for r in aws_regions %}
{%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
{% for r in aws_regions %} {% for r in aws_regions %}
{{ loop.index }}. {{ r['region_name'] }} {%- if r['region_name'] == "us-east-1" %}{{ loop.index }}{% endif %}
{% endfor %} {%- endfor %}
- pause:
prompt: |
What region should the server be located in?
(https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
{% for r in aws_regions %}
{{ loop.index }}. {{ r['region_name'] }}
{% endfor %}
Enter the number of your desired region Enter the number of your desired region
[{{ default_region }}] [{{ default_region }}]
register: _algo_region register: _algo_region
when: region is undefined when: region is undefined
- name: Set algo_region and stack_name facts - name: Set algo_region and stack_name facts
@ -63,26 +63,26 @@
stack_name: "{{ algo_server_name | replace('.', '-') }}" stack_name: "{{ algo_server_name | replace('.', '-') }}"
- block: - block:
- name: Get existing available Elastic IPs - name: Get existing available Elastic IPs
ec2_eip_facts: ec2_eip_info:
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
region: "{{ algo_region }}" region: "{{ algo_region }}"
register: raw_eip_addresses register: raw_eip_addresses
- set_fact: - set_fact:
available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}" available_eip_addresses: "{{ raw_eip_addresses.addresses | selectattr('association_id', 'undefined') | list }}"
- pause: - pause:
prompt: >- prompt: >-
What Elastic IP would you like to use? What Elastic IP would you like to use?
{% for eip in available_eip_addresses %} {% for eip in available_eip_addresses %}
{{ loop.index }}. {{ eip['public_ip'] }} {{ loop.index }}. {{ eip['public_ip'] }}
{% endfor %} {% endfor %}
Enter the number of your desired Elastic IP Enter the number of your desired Elastic IP
register: _use_existing_eip register: _use_existing_eip
- set_fact: - set_fact:
existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}" existing_eip: "{{ available_eip_addresses[_use_existing_eip.user_input | int -1 ]['allocation_id'] }}"
when: cloud_providers.ec2.use_existing_eip when: cloud_providers.ec2.use_existing_eip

@ -27,27 +27,27 @@
allowed: allowed:
- ip_protocol: udp - ip_protocol: udp
ports: ports:
- '500' - "500"
- '4500' - "4500"
- '{{ wireguard_port|string }}' - "{{ wireguard_port|string }}"
- ip_protocol: tcp - ip_protocol: tcp
ports: ports:
- '{{ ssh_port }}' - "{{ ssh_port }}"
- ip_protocol: icmp - ip_protocol: icmp
- block: - block:
- name: External IP allocated - name: External IP allocated
gcp_compute_address: gcp_compute_address:
auth_kind: serviceaccount auth_kind: serviceaccount
service_account_file: "{{ credentials_file_path }}" service_account_file: "{{ credentials_file_path }}"
project: "{{ project_id }}" project: "{{ project_id }}"
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
region: "{{ algo_region }}" region: "{{ algo_region }}"
register: gcp_compute_address register: gcp_compute_address
- name: Set External IP as a fact - name: Set External IP as a fact
set_fact: set_fact:
external_ip: "{{ gcp_compute_address.address }}" external_ip: "{{ gcp_compute_address.address }}"
when: cloud_providers.gce.external_static_ip when: cloud_providers.gce.external_static_ip
- name: Instance created - name: Instance created
@ -62,9 +62,9 @@
- auto_delete: true - auto_delete: true
boot: true boot: true
initialize_params: initialize_params:
source_image: "projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }}" source_image: projects/ubuntu-os-cloud/global/images/family/{{ cloud_providers.gce.image }}
metadata: metadata:
ssh-keys: "algo:{{ ssh_public_key_lookup }}" ssh-keys: algo:{{ ssh_public_key_lookup }}
user-data: "{{ lookup('template', 'files/cloud-init/base.yml') }}" user-data: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
network_interfaces: network_interfaces:
- network: "{{ gcp_compute_network }}" - network: "{{ gcp_compute_network }}"
@ -74,7 +74,7 @@
type: ONE_TO_ONE_NAT type: ONE_TO_ONE_NAT
tags: tags:
items: items:
- "environment-algo" - environment-algo
register: gcp_compute_instance register: gcp_compute_instance
- set_fact: - set_fact:

@ -9,7 +9,8 @@
- lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0 - lookup('env','GCE_CREDENTIALS_FILE_PATH')|length <= 0
- set_fact: - set_fact:
credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}" credentials_file_path: "{{ gce_credentials_file | default(_gce_credentials_file.user_input|default(None)) | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'),\
\ true) }}"
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}" ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
- set_fact: - set_fact:
@ -20,40 +21,40 @@
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}" project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
- block: - block:
- name: Get regions - name: Get regions
gcp_compute_location_info: gcp_compute_location_info:
auth_kind: serviceaccount auth_kind: serviceaccount
service_account_file: "{{ credentials_file_path }}" service_account_file: "{{ credentials_file_path }}"
project: "{{ project_id }}" project: "{{ project_id }}"
scope: regions scope: regions
filters: status=UP filters: status=UP
register: gcp_compute_regions_info register: gcp_compute_regions_info
- name: Set facts about the regions - name: Set facts about the regions
set_fact: set_fact:
gce_regions: >- gce_regions: >-
[{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%} [{%- for region in gcp_compute_regions_info.resources | sort(attribute='name') -%}
'{{ region.name }}'{% if not loop.last %},{% endif %} '{{ region.name }}'{% if not loop.last %},{% endif %}
{%- endfor -%}] {%- endfor -%}]
- name: Set facts about the default region - name: Set facts about the default region
set_fact: set_fact:
default_region: >- default_region: >-
{% for region in gce_regions %} {% for region in gce_regions %}
{%- if region == "us-east1" %}{{ loop.index }}{% endif %} {%- if region == "us-east1" %}{{ loop.index }}{% endif %}
{%- endfor %} {%- endfor %}
- pause: - pause:
prompt: | prompt: |
What region should the server be located in? What region should the server be located in?
(https://cloud.google.com/compute/docs/regions-zones/#locations) (https://cloud.google.com/compute/docs/regions-zones/#locations)
{% for r in gce_regions %} {% for r in gce_regions %}
{{ loop.index }}. {{ r }} {{ loop.index }}. {{ r }}
{% endfor %} {% endfor %}
Enter the number of your desired region Enter the number of your desired region
[{{ default_region }}] [{{ default_region }}]
register: _gce_region register: _gce_region
when: region is undefined when: region is undefined
- name: Set region as a fact - name: Set region as a fact
@ -70,8 +71,8 @@
project: "{{ project_id }}" project: "{{ project_id }}"
scope: zones scope: zones
filters: filters:
- "name={{ algo_region }}-*" - name={{ algo_region }}-*
- "status=UP" - status=UP
register: gcp_compute_zone_info register: gcp_compute_zone_info
- name: Set random available zone as a fact - name: Set random available zone as a fact

@ -6,15 +6,15 @@
import_tasks: prompts.yml import_tasks: prompts.yml
- name: Create an ssh key - name: Create an ssh key
hcloud_ssh_key: hetzner.hcloud.ssh_key:
name: "algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }}" name: algo-{{ 999999 | random(seed=lookup('file', SSH_keys.public)) }}
public_key: "{{ lookup('file', SSH_keys.public) }}" public_key: "{{ lookup('file', SSH_keys.public) }}"
state: present state: present
api_token: "{{ algo_hcloud_token }}" api_token: "{{ algo_hcloud_token }}"
register: hcloud_ssh_key register: hcloud_ssh_key
- name: Create a server... - name: Create a server...
hcloud_server: hetzner.hcloud.server:
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
location: "{{ algo_hcloud_region }}" location: "{{ algo_hcloud_region }}"
server_type: "{{ cloud_providers.hetzner.server_type }}" server_type: "{{ cloud_providers.hetzner.server_type }}"

@ -13,13 +13,13 @@
algo_hcloud_token: "{{ hcloud_token | default(_hcloud_token.user_input|default(None)) | default(lookup('env','HCLOUD_TOKEN'), true) }}" algo_hcloud_token: "{{ hcloud_token | default(_hcloud_token.user_input|default(None)) | default(lookup('env','HCLOUD_TOKEN'), true) }}"
- name: Get regions - name: Get regions
hcloud_datacenter_facts: hetzner.hcloud.datacenter_info:
api_token: "{{ algo_hcloud_token }}" api_token: "{{ algo_hcloud_token }}"
register: _hcloud_regions register: _hcloud_regions
- name: Set facts about thre regions - name: Set facts about the regions
set_fact: set_fact:
hcloud_regions: "{{ hcloud_datacenter_facts | sort(attribute='location') }}" hcloud_regions: "{{ _hcloud_regions.hcloud_datacenter_info | sort(attribute='location') }}"
- name: Set default region - name: Set default region
set_fact: set_fact:

@ -0,0 +1,73 @@
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Algo VPN stack (LightSail)'
Parameters:
InstanceTypeParameter:
Type: String
Default: 'nano_2_0'
ImageIdParameter:
Type: String
Default: 'ubuntu_20_04'
WireGuardPort:
Type: String
Default: '51820'
SshPort:
Type: String
Default: '4160'
UserData:
Type: String
Default: 'true'
Resources:
Instance:
Type: AWS::Lightsail::Instance
Properties:
BlueprintId:
Ref: ImageIdParameter
BundleId:
Ref: InstanceTypeParameter
InstanceName: !Ref AWS::StackName
Networking:
Ports:
- AccessDirection: inbound
Cidrs: ['0.0.0.0/0']
Ipv6Cidrs: ['::/0']
CommonName: SSH
FromPort: !Ref SshPort
ToPort: !Ref SshPort
Protocol: tcp
- AccessDirection: inbound
Cidrs: ['0.0.0.0/0']
Ipv6Cidrs: ['::/0']
CommonName: WireGuard
FromPort: !Ref WireGuardPort
ToPort: !Ref WireGuardPort
Protocol: udp
- AccessDirection: inbound
Cidrs: ['0.0.0.0/0']
Ipv6Cidrs: ['::/0']
CommonName: IPSec-4500
FromPort: 4500
ToPort: 4500
Protocol: udp
- AccessDirection: inbound
Cidrs: ['0.0.0.0/0']
Ipv6Cidrs: ['::/0']
CommonName: IPSec-500
FromPort: 500
ToPort: 500
Protocol: udp
Tags:
- Key: Name
Value: !Ref AWS::StackName
UserData: !Ref UserData
StaticIP:
Type: AWS::Lightsail::StaticIp
Properties:
AttachedTo: !Ref Instance
StaticIpName: !Join [ "-", [ !Ref AWS::StackName, "ip" ] ]
DependsOn:
- Instance
Outputs:
IpAddress:
Value: !GetAtt [StaticIP, IpAddress]

@ -0,0 +1,19 @@
---
- name: Deploy the template
cloudformation:
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
stack_name: "{{ stack_name }}"
state: present
region: "{{ algo_region }}"
template: roles/cloud-lightsail/files/stack.yaml
template_parameters:
InstanceTypeParameter: "{{ cloud_providers.lightsail.size }}"
ImageIdParameter: "{{ cloud_providers.lightsail.image }}"
WireGuardPort: "{{ wireguard_port }}"
SshPort: "{{ ssh_port }}"
UserData: "{{ lookup('template', 'files/cloud-init/base.sh') }}"
tags:
Environment: Algo
Lightsail: true
register: stack

@ -5,36 +5,11 @@
- name: Include prompts - name: Include prompts
import_tasks: prompts.yml import_tasks: prompts.yml
- name: Create an instance - name: Deploy the stack
lightsail: import_tasks: cloudformation.yml
aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}"
name: "{{ algo_server_name }}"
state: present
region: "{{ algo_region }}"
zone: "{{ algo_region }}a"
blueprint_id: "{{ cloud_providers.lightsail.image }}"
bundle_id: "{{ cloud_providers.lightsail.size }}"
wait_timeout: "300"
open_ports:
- from_port: "{{ ssh_port }}"
to_port: "{{ ssh_port }}"
protocol: tcp
- from_port: 4500
to_port: 4500
protocol: udp
- from_port: 500
to_port: 500
protocol: udp
- from_port: "{{ wireguard_port }}"
to_port: "{{ wireguard_port }}"
protocol: udp
user_data: |
{{ lookup('template', 'files/cloud-init/base.sh') }}
register: algo_instance
- set_fact: - set_fact:
cloud_instance_ip: "{{ algo_instance['instance']['public_ip_address'] }}" cloud_instance_ip: "{{ stack.stack_outputs.IpAddress }}"
ansible_ssh_user: algo ansible_ssh_user: algo
ansible_ssh_port: "{{ ssh_port }}" ansible_ssh_port: "{{ ssh_port }}"
cloudinit: true cloudinit: true

@ -6,8 +6,8 @@
echo: false echo: false
register: _aws_access_key register: _aws_access_key
when: when:
- aws_access_key is undefined - aws_access_key is undefined
- lookup('env','AWS_ACCESS_KEY_ID')|length <= 0 - lookup('env','AWS_ACCESS_KEY_ID')|length <= 0
- pause: - pause:
prompt: | prompt: |
@ -23,38 +23,39 @@
secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}" secret_key: "{{ aws_secret_key | default(_aws_secret_key.user_input|default(None)) | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
- block: - block:
- name: Get regions - name: Get regions
lightsail_region_facts: lightsail_region_facts:
aws_access_key: "{{ access_key }}" aws_access_key: "{{ access_key }}"
aws_secret_key: "{{ secret_key }}" aws_secret_key: "{{ secret_key }}"
region: us-east-1 region: us-east-1
register: _lightsail_regions register: _lightsail_regions
- name: Set facts about the regions - name: Set facts about the regions
set_fact: set_fact:
lightsail_regions: "{{ _lightsail_regions.data.regions | sort(attribute='name') }}" lightsail_regions: "{{ _lightsail_regions.data.regions | sort(attribute='name') }}"
- name: Set the default region - name: Set the default region
set_fact: set_fact:
default_region: >- default_region: >-
{% for r in lightsail_regions %}
{%- if r['name'] == "us-east-1" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
(https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/)
{% for r in lightsail_regions %} {% for r in lightsail_regions %}
{{ (loop.index|string + '.').ljust(3) }} {{ r['name'].ljust(20) }} {{ r['displayName'] }} {%- if r['name'] == "us-east-1" %}{{ loop.index }}{% endif %}
{% endfor %} {%- endfor %}
Enter the number of your desired region - pause:
[{{ default_region }}] prompt: |
register: _algo_region What region should the server be located in?
(https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/)
{% for r in lightsail_regions %}
{{ (loop.index|string + '.').ljust(3) }} {{ r['name'].ljust(20) }} {{ r['displayName'] }}
{% endfor %}
Enter the number of your desired region
[{{ default_region }}]
register: _algo_region
when: region is undefined when: region is undefined
- set_fact: - set_fact:
stack_name: "{{ algo_server_name | replace('.', '-') }}"
algo_region: >- algo_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }} {%- elif _algo_region.user_input %}{{ lightsail_regions[_algo_region.user_input | int -1 ]['name'] }}

@ -0,0 +1,2 @@
---
linode_venv: "{{ playbook_dir }}/configs/.venvs/linode"

@ -0,0 +1,56 @@
---
- name: Build python virtual environment
import_tasks: venv.yml
- name: Include prompts
import_tasks: prompts.yml
- name: Set facts
set_fact:
stackscript: |
{{ lookup('template', 'files/cloud-init/base.sh') }}
mkdir -p /var/lib/cloud/data/ || true
touch /var/lib/cloud/data/result.json
- name: Create a stackscript
linode_stackscript_v4:
access_token: "{{ algo_linode_token }}"
label: "{{ algo_server_name }}"
state: present
description: Environment:Algo
images:
- "{{ cloud_providers.linode.image }}"
script: |
{{ stackscript }}
register: _linode_stackscript
- name: Update the stackscript
uri:
url: https://api.linode.com/v4/linode/stackscripts/{{ _linode_stackscript.stackscript.id }}
method: PUT
body_format: json
body:
script: |
{{ stackscript }}
headers:
Content-Type: application/json
Authorization: Bearer {{ algo_linode_token }}
when: (_linode_stackscript.stackscript.script | hash('md5')) != (stackscript | hash('md5'))
- name: Creating an instance...
linode_v4:
access_token: "{{ algo_linode_token }}"
label: "{{ algo_server_name }}"
state: present
region: "{{ algo_linode_region }}"
image: "{{ cloud_providers.linode.image }}"
type: "{{ cloud_providers.linode.type }}"
authorized_keys: "{{ public_key }}"
stackscript_id: "{{ _linode_stackscript.stackscript.id }}"
register: _linode
- set_fact:
cloud_instance_ip: "{{ _linode.instance.ipv4[0] }}"
ansible_ssh_user: algo
ansible_ssh_port: "{{ ssh_port }}"
cloudinit: true

@ -0,0 +1,51 @@
---
- pause:
prompt: |
Enter your ACCESS token. (https://developers.linode.com/api/v4/#access-and-authentication):
echo: false
register: _linode_token
when:
- linode_token is undefined
- lookup('env','LINODE_API_TOKEN')|length <= 0
- name: Set the token as a fact
set_fact:
algo_linode_token: "{{ linode_token | default(_linode_token.user_input|default(None)) | default(lookup('env','LINODE_API_TOKEN'), true) }}"
- name: Get regions
uri:
url: https://api.linode.com/v4/regions
method: GET
status_code: 200
register: _linode_regions
- name: Set facts about the regions
set_fact:
linode_regions: "{{ _linode_regions.json.data | sort(attribute='id') }}"
- name: Set default region
set_fact:
default_region: >-
{% for r in linode_regions %}
{%- if r['id'] == "us-east" %}{{ loop.index }}{% endif %}
{%- endfor %}
- pause:
prompt: |
What region should the server be located in?
{% for r in linode_regions %}
{{ loop.index }}. {{ r['id'] }}
{% endfor %}
Enter the number of your desired region
[{{ default_region }}]
register: _algo_region
when: region is undefined
- name: Set additional facts
set_fact:
algo_linode_region: >-
{% if region is defined %}{{ region }}
{%- elif _algo_region.user_input %}{{ linode_regions[_algo_region.user_input | int -1 ]['id'] }}
{%- else %}{{ linode_regions[default_region | int - 1]['id'] }}{% endif %}
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"

@ -0,0 +1,7 @@
---
- name: Install requirements
pip:
name:
- linode_api4
state: latest
virtualenv_python: python3

@ -7,14 +7,14 @@
import_tasks: venv.yml import_tasks: venv.yml
- name: Security group created - name: Security group created
os_security_group: openstack.cloud.security_group:
state: "{{ state|default('present') }}" state: "{{ state|default('present') }}"
name: "{{ algo_server_name }}-security_group" name: "{{ algo_server_name }}-security_group"
description: AlgoVPN security group description: AlgoVPN security group
register: os_security_group register: os_security_group
- name: Security rules created - name: Security rules created
os_security_group_rule: openstack.cloud.security_group_rule:
state: "{{ state|default('present') }}" state: "{{ state|default('present') }}"
security_group: "{{ os_security_group.id }}" security_group: "{{ os_security_group.id }}"
protocol: "{{ item.proto }}" protocol: "{{ item.proto }}"
@ -22,29 +22,32 @@
port_range_max: "{{ item.port_max }}" port_range_max: "{{ item.port_max }}"
remote_ip_prefix: "{{ item.range }}" remote_ip_prefix: "{{ item.range }}"
with_items: with_items:
- { proto: tcp, port_min: '{{ ssh_port }}', port_max: '{{ ssh_port }}', range: 0.0.0.0/0 } - { proto: tcp, port_min: "{{ ssh_port }}", port_max: "{{ ssh_port }}", range: 0.0.0.0/0 }
- { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 } - { proto: icmp, port_min: -1, port_max: -1, range: 0.0.0.0/0 }
- { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 } - { proto: udp, port_min: 4500, port_max: 4500, range: 0.0.0.0/0 }
- { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 } - { proto: udp, port_min: 500, port_max: 500, range: 0.0.0.0/0 }
- { proto: udp, port_min: "{{ wireguard_port }}", port_max: "{{ wireguard_port }}", range: 0.0.0.0/0 } - { proto: udp, port_min: "{{ wireguard_port }}", port_max: "{{ wireguard_port }}", range: 0.0.0.0/0 }
- name: Gather facts about flavors - name: Gather facts about flavors
os_flavor_facts: openstack.cloud.compute_flavor_info:
ram: "{{ cloud_providers.openstack.flavor_ram }}" ram: "{{ cloud_providers.openstack.flavor_ram }}"
register: os_flavor
- name: Gather facts about images - name: Gather facts about images
os_image_facts: openstack.cloud.image_info:
register: os_image
- name: Set image as a fact - name: Set image as a fact
set_fact: set_fact:
image_id: "{{ item.id }}" image_id: "{{ item.id }}"
loop: "{{ openstack_image }}" loop: "{{ os_image.openstack_image }}"
when: when:
- item.name == cloud_providers.openstack.image - item.name == cloud_providers.openstack.image
- item.status == "active" - item.status == "active"
- name: Gather facts about public networks - name: Gather facts about public networks
os_networks_facts: openstack.cloud.networks_info:
register: os_network
- name: Set the network as a fact - name: Set the network as a fact
set_fact: set_fact:
@ -53,15 +56,15 @@
- item['router:external']|default(omit) - item['router:external']|default(omit)
- item['admin_state_up']|default(omit) - item['admin_state_up']|default(omit)
- item['status'] == 'ACTIVE' - item['status'] == 'ACTIVE'
with_items: "{{ openstack_networks }}" with_items: "{{ os_network.openstack_networks }}"
- name: Set facts - name: Set facts
set_fact: set_fact:
flavor_id: "{{ (openstack_flavors | sort(attribute='ram'))[0]['id'] }}" flavor_id: "{{ (os_flavor.openstack_flavors | sort(attribute='ram'))[0]['id'] }}"
security_group_name: "{{ os_security_group['secgroup']['name'] }}" security_group_name: "{{ os_security_group['secgroup']['name'] }}"
- name: Server created - name: Server created
os_server: openstack.cloud.server:
state: "{{ state|default('present') }}" state: "{{ state|default('present') }}"
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
image: "{{ image_id }}" image: "{{ image_id }}"

@ -1,71 +1,74 @@
---
- name: Include prompts - name: Include prompts
import_tasks: prompts.yml import_tasks: prompts.yml
- block: - block:
- name: Gather Scaleway organizations facts - name: Gather Scaleway organizations facts
scaleway_organization_facts: scaleway_organization_info:
register: scaleway_org
- name: Get images - name: Get images
scaleway_image_facts: scaleway_image_info:
region: "{{ algo_region }}" region: "{{ algo_region }}"
register: scaleway_image
- name: Set cloud specific facts - name: Set cloud specific facts
set_fact: set_fact:
organization_id: "{{ scaleway_organization_facts[0]['id'] }}" organization_id: "{{ scaleway_org.scaleway_organization_info[0]['id'] }}"
images: >- images: >-
[{% for i in scaleway_image_facts -%} [{% for i in scaleway_image.scaleway_image_info -%}
{% if i.name == cloud_providers.scaleway.image and {% if i.name == cloud_providers.scaleway.image and
i.arch == cloud_providers.scaleway.arch -%} i.arch == cloud_providers.scaleway.arch -%}
'{{ i.id }}'{% if not loop.last %},{% endif %} '{{ i.id }}'{% if not loop.last %},{% endif %}
{%- endif -%} {%- endif -%}
{%- endfor -%}] {%- endfor -%}]
- name: Create a server - name: Create a server
scaleway_compute: scaleway_compute:
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
enable_ipv6: true enable_ipv6: true
public_ip: dynamic public_ip: dynamic
boot_type: local boot_type: local
state: present state: present
image: "{{ images[0] }}" image: "{{ images[0] }}"
organization: "{{ organization_id }}" organization: "{{ organization_id }}"
region: "{{ algo_region }}" region: "{{ algo_region }}"
commercial_type: "{{ cloud_providers.scaleway.size }}" commercial_type: "{{ cloud_providers.scaleway.size }}"
wait: true wait: true
tags: tags:
- Environment:Algo - Environment:Algo
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
register: scaleway_compute register: scaleway_compute
- name: Patch the cloud-init - name: Patch the cloud-init
uri: uri:
url: "https://cp-{{ algo_region }}.scaleway.com/servers/{{ scaleway_compute.msg.id }}/user_data/cloud-init" url: https://cp-{{ algo_region }}.scaleway.com/servers/{{ scaleway_compute.msg.id }}/user_data/cloud-init
method: PATCH method: PATCH
body: "{{ lookup('template', 'files/cloud-init/base.yml') }}" body: "{{ lookup('template', 'files/cloud-init/base.yml') }}"
status_code: 204 status_code: 204
headers: headers:
Content-Type: "text/plain" Content-Type: text/plain
X-Auth-Token: "{{ algo_scaleway_token }}" X-Auth-Token: "{{ algo_scaleway_token }}"
- name: Start the server - name: Start the server
scaleway_compute: scaleway_compute:
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
enable_ipv6: true enable_ipv6: true
public_ip: dynamic public_ip: dynamic
boot_type: local boot_type: local
state: running state: running
image: "{{ images[0] }}" image: "{{ images[0] }}"
organization: "{{ organization_id }}" organization: "{{ organization_id }}"
region: "{{ algo_region }}" region: "{{ algo_region }}"
commercial_type: "{{ cloud_providers.scaleway.size }}" commercial_type: "{{ cloud_providers.scaleway.size }}"
wait: true wait: true
tags: tags:
- Environment:Algo - Environment:Algo
- AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }} - AUTHORIZED_KEY={{ lookup('file', SSH_keys.public)|regex_replace(' ', '_') }}
register: algo_instance register: algo_instance
until: algo_instance.msg.public_ip until: algo_instance.msg.public_ip
retries: 3 retries: 3
delay: 3 delay: 3
environment: environment:
SCW_TOKEN: "{{ algo_scaleway_token }}" SCW_TOKEN: "{{ algo_scaleway_token }}"

@ -3,56 +3,56 @@
import_tasks: prompts.yml import_tasks: prompts.yml
- block: - block:
- name: Creating a firewall group - name: Creating a firewall group
vultr_firewall_group: vultr.cloud.firewall_group:
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
- name: Creating firewall rules - name: Creating firewall rules
vultr_firewall_rule: vultr.cloud.firewall_rule:
group: "{{ algo_server_name }}" group: "{{ algo_server_name }}"
protocol: "{{ item.protocol }}" protocol: "{{ item.protocol }}"
port: "{{ item.port }}" port: "{{ item.port }}"
ip_version: "{{ item.ip }}" ip_type: "{{ item.ip }}"
cidr: "{{ item.cidr }}" subnet: "{{ item.cidr.split('/')[0] }}"
with_items: subnet_size: "{{ item.cidr.split('/')[1] }}"
- { protocol: tcp, port: "{{ ssh_port }}", ip: v4, cidr: "0.0.0.0/0" } with_items:
- { protocol: tcp, port: "{{ ssh_port }}", ip: v6, cidr: "::/0" } - { protocol: tcp, port: "{{ ssh_port }}", ip: v4, cidr: 0.0.0.0/0 }
- { protocol: udp, port: 500, ip: v4, cidr: "0.0.0.0/0" } - { protocol: tcp, port: "{{ ssh_port }}", ip: v6, cidr: "::/0" }
- { protocol: udp, port: 500, ip: v6, cidr: "::/0" } - { protocol: udp, port: 500, ip: v4, cidr: 0.0.0.0/0 }
- { protocol: udp, port: 4500, ip: v4, cidr: "0.0.0.0/0" } - { protocol: udp, port: 500, ip: v6, cidr: "::/0" }
- { protocol: udp, port: 4500, ip: v6, cidr: "::/0" } - { protocol: udp, port: 4500, ip: v4, cidr: 0.0.0.0/0 }
- { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: "0.0.0.0/0" } - { protocol: udp, port: 4500, ip: v6, cidr: "::/0" }
- { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" } - { protocol: udp, port: "{{ wireguard_port }}", ip: v4, cidr: 0.0.0.0/0 }
- { protocol: udp, port: "{{ wireguard_port }}", ip: v6, cidr: "::/0" }
- name: Upload the startup script - name: Upload the startup script
vultr_startup_script: vultr.cloud.startup_script:
name: algo-startup name: algo-startup
script: | script: |
{{ lookup('template', 'files/cloud-init/base.sh') }} {{ lookup('template', 'files/cloud-init/base.yml') }}
mkdir -p /var/lib/cloud/data/ || true
touch /var/lib/cloud/data/result.json
- name: Creating a server - name: Creating a server
vultr_server: vultr.cloud.instance:
name: "{{ algo_server_name }}" name: "{{ algo_server_name }}"
startup_script: algo-startup startup_script: algo-startup
hostname: "{{ algo_server_name }}" hostname: "{{ algo_server_name }}"
os: "{{ cloud_providers.vultr.os }}" os: "{{ cloud_providers.vultr.os }}"
plan: "{{ cloud_providers.vultr.size }}" plan: "{{ cloud_providers.vultr.size }}"
region: "{{ algo_vultr_region }}" region: "{{ algo_vultr_region }}"
firewall_group: "{{ algo_server_name }}" firewall_group: "{{ algo_server_name }}"
state: started state: started
tag: Environment:Algo tags:
ipv6_enabled: true - Environment:Algo
auto_backup_enabled: false enable_ipv6: true
notify_activate: false backups: false
register: vultr_server activation_email: false
register: vultr_server
- set_fact: - set_fact:
cloud_instance_ip: "{{ vultr_server.vultr_server.v4_main_ip }}" cloud_instance_ip: "{{ vultr_server.vultr_instance.main_ip }}"
ansible_ssh_user: algo ansible_ssh_user: algo
ansible_ssh_port: "{{ ssh_port }}" ansible_ssh_port: "{{ ssh_port }}"
cloudinit: true cloudinit: true
environment: environment:
VULTR_API_CONFIG: "{{ algo_vultr_config }}" VULTR_API_KEY: "{{ lookup('ini', 'key', section='default', file=algo_vultr_config) }}"

@ -42,7 +42,7 @@
What region should the server be located in? What region should the server be located in?
(https://www.vultr.com/locations/): (https://www.vultr.com/locations/):
{% for r in vultr_regions %} {% for r in vultr_regions %}
{{ loop.index }}. {{ r['name'] }} {{ loop.index }}. {{ r['name'] }} ({{ r['regioncode'] | lower }})
{% endfor %} {% endfor %}
Enter the number of your desired region Enter the number of your desired region
@ -54,5 +54,5 @@
set_fact: set_fact:
algo_vultr_region: >- algo_vultr_region: >-
{% if region is defined %}{{ region }} {% if region is defined %}{{ region }}
{%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['name'] }} {%- elif _algo_region.user_input %}{{ vultr_regions[_algo_region.user_input | int -1 ]['regioncode'] | lower }}
{%- else %}{{ vultr_regions[default_region | int - 1]['name'] }}{% endif %} {%- else %}{{ vultr_regions[default_region | int - 1]['regioncode'] | lower }}{% endif %}

@ -1,3 +1,4 @@
---
- name: restart rsyslog - name: restart rsyslog
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
@ -13,6 +14,11 @@
state: restarted state: restarted
daemon_reload: true daemon_reload: true
- name: restart systemd-resolved
systemd:
name: systemd-resolved
state: restarted
- name: restart loopback bsd - name: restart loopback bsd
shell: > shell: >
ifconfig lo100 destroy || true && ifconfig lo100 destroy || true &&

@ -13,13 +13,12 @@
- name: Gather facts - name: Gather facts
setup: setup:
- name: Gather additional facts - name: Gather additional facts
import_tasks: facts.yml import_tasks: facts.yml
- name: Set OS specific facts - name: Set OS specific facts
set_fact: set_fact:
config_prefix: "/usr/local/" config_prefix: /usr/local/
strongswan_shell: /usr/sbin/nologin strongswan_shell: /usr/sbin/nologin
strongswan_home: /var/empty strongswan_home: /var/empty
root_group: wheel root_group: wheel
@ -50,7 +49,7 @@
- name: Loopback included into the rc config - name: Loopback included into the rc config
blockinfile: blockinfile:
dest: /etc/rc.conf dest: /etc/rc.conf
create: yes create: true
block: | block: |
cloned_interfaces="lo100" cloned_interfaces="lo100"
ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255" ifconfig_lo100="inet {{ local_service_ip }} netmask 255.255.255.255"

@ -1,5 +1,4 @@
--- ---
- name: Iptables configured - name: Iptables configured
template: template:
src: "{{ item.src }}" src: "{{ item.src }}"

@ -6,6 +6,9 @@
tags: tags:
- update-users - update-users
- fail:
when: cloud_test|default(false)|bool
- include_tasks: ubuntu.yml - include_tasks: ubuntu.yml
when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout' when: '"Ubuntu" in OS.stdout or "Linux" in OS.stdout'
tags: tags:

@ -1,7 +1,6 @@
--- ---
- name: Gather facts - name: Gather facts
setup: setup:
- name: Cloud only tasks - name: Cloud only tasks
block: block:
- name: Install software updates - name: Install software updates
@ -36,14 +35,23 @@
become: false become: false
when: algo_provider != "local" when: algo_provider != "local"
- name: Include unatteded upgrades configuration - name: Include unattended upgrades configuration
import_tasks: unattended-upgrades.yml import_tasks: unattended-upgrades.yml
- name: Disable MOTD on login and SSHD - name: Disable MOTD on login and SSHD
replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}" replace: dest="{{ item.file }}" regexp="{{ item.regexp }}" replace="{{ item.line }}"
with_items: with_items:
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/login' } - { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/login }
- { regexp: '^session.*optional.*pam_motd.so.*', line: '# MOTD DISABLED', file: '/etc/pam.d/sshd' } - { regexp: ^session.*optional.*pam_motd.so.*, line: "# MOTD DISABLED", file: /etc/pam.d/sshd }
- name: Ensure fallback resolvers are set
ini_file:
path: /etc/systemd/resolved.conf
section: Resolve
option: FallbackDNS
value: "{{ dns_servers.ipv4 | join(' ') }}"
notify:
- restart systemd-resolved
- name: Loopback for services configured - name: Loopback for services configured
template: template:
@ -66,7 +74,7 @@
- name: Check apparmor support - name: Check apparmor support
command: apparmor_status command: apparmor_status
ignore_errors: yes ignore_errors: true
changed_when: false changed_when: false
register: apparmor_status register: apparmor_status
@ -90,6 +98,7 @@
- cgroup-tools - cgroup-tools
- openssl - openssl
- gnupg2 - gnupg2
- cron
sysctl: sysctl:
- item: net.ipv4.ip_forward - item: net.ipv4.ip_forward
value: 1 value: 1
@ -108,9 +117,9 @@
apt: apt:
name: name:
- linux-headers-generic - linux-headers-generic
- "linux-headers-{{ ansible_kernel }}" - linux-headers-{{ ansible_kernel }}
state: present state: present
when: install_headers when: install_headers | bool
- name: Configure the alternative ingress ip - name: Configure the alternative ingress ip
include_tasks: aip/main.yml include_tasks: aip/main.yml

@ -95,7 +95,7 @@ COMMIT
-A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
# Drop SMB/CIFS traffic that requests to be forwarded # Drop SMB/CIFS traffic that requests to be forwarded
-A FORWARD -p tcp --dport 445 -j {{ "DROP" if block_smb else "ACCEPT" }} -A FORWARD -p tcp --dport 445 -j {{ "DROP" if block_smb else "ACCEPT" }}
# Drop NETBIOS trafic that requests to be forwarded # Drop NETBIOS traffic that requests to be forwarded
-A FORWARD -p udp -m multiport --ports 137,138 -j {{ "DROP" if block_netbios else "ACCEPT" }} -A FORWARD -p udp -m multiport --ports 137,138 -j {{ "DROP" if block_netbios else "ACCEPT" }}
-A FORWARD -p tcp -m multiport --ports 137,139 -j {{ "DROP" if block_netbios else "ACCEPT" }} -A FORWARD -p tcp -m multiport --ports 137,139 -j {{ "DROP" if block_netbios else "ACCEPT" }}

@ -14,7 +14,7 @@
/etc/dnscrypt-proxy/** r, /etc/dnscrypt-proxy/** r,
/usr/bin/dnscrypt-proxy mr, /usr/bin/dnscrypt-proxy mr,
/tmp/public-resolvers.md* rw, /var/cache/{private/,}dnscrypt-proxy/** rw,
/tmp/*.tmp w, /tmp/*.tmp w,
owner /tmp/*.tmp r, owner /tmp/*.tmp r,

@ -6,4 +6,4 @@
- name: Enable mac_portacl - name: Enable mac_portacl
lineinfile: lineinfile:
path: /etc/rc.conf path: /etc/rc.conf
line: 'dnscrypt_proxy_mac_portacl_enable="YES"' line: dnscrypt_proxy_mac_portacl_enable="YES"

@ -1,22 +1,22 @@
--- ---
- block: - block:
- name: Add the repository - name: Add the repository
apt_repository: apt_repository:
state: present state: present
codename: "{{ ansible_distribution_release }}" codename: "{{ ansible_distribution_release }}"
repo: ppa:shevchuk/dnscrypt-proxy repo: ppa:shevchuk/dnscrypt-proxy
register: result register: result
until: result is succeeded until: result is succeeded
retries: 10 retries: 10
delay: 3 delay: 3
- name: Configure unattended-upgrades - name: Configure unattended-upgrades
copy: copy:
src: 50-dnscrypt-proxy-unattended-upgrades src: 50-dnscrypt-proxy-unattended-upgrades
dest: /etc/apt/apt.conf.d/50-dnscrypt-proxy-unattended-upgrades dest: /etc/apt/apt.conf.d/50-dnscrypt-proxy-unattended-upgrades
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
when: ansible_facts['distribution_version'] is version('20.04', '<') when: ansible_facts['distribution_version'] is version('20.04', '<')
- name: Install dnscrypt-proxy - name: Install dnscrypt-proxy
@ -26,18 +26,18 @@
update_cache: true update_cache: true
- block: - block:
- name: Ubuntu | Configure AppArmor policy for dnscrypt-proxy - name: Ubuntu | Configure AppArmor policy for dnscrypt-proxy
copy: copy:
src: apparmor.profile.dnscrypt-proxy src: apparmor.profile.dnscrypt-proxy
dest: /etc/apparmor.d/usr.bin.dnscrypt-proxy dest: /etc/apparmor.d/usr.bin.dnscrypt-proxy
owner: root owner: root
group: root group: root
mode: 0600 mode: 0600
notify: restart dnscrypt-proxy notify: restart dnscrypt-proxy
- name: Ubuntu | Enforce the dnscrypt-proxy AppArmor policy - name: Ubuntu | Enforce the dnscrypt-proxy AppArmor policy
command: aa-enforce usr.bin.dnscrypt-proxy command: aa-enforce usr.bin.dnscrypt-proxy
changed_when: false changed_when: false
tags: apparmor tags: apparmor
when: apparmor_enabled|default(false)|bool when: apparmor_enabled|default(false)|bool
@ -60,4 +60,4 @@
[Service] [Service]
AmbientCapabilities=CAP_NET_BIND_SERVICE AmbientCapabilities=CAP_NET_BIND_SERVICE
notify: notify:
- restart dnscrypt-proxy - restart dnscrypt-proxy

@ -118,11 +118,12 @@ timeout = 2500
keepalive = 30 keepalive = 30
## Use the REFUSED return code for blocked responses ## Response for blocked queries. Options are `refused`, `hinfo` (default) or
## Setting this to `false` means that some responses will be lies. ## an IP response. To give an IP response, use the format `a:<IPv4>,aaaa:<IPv6>`.
## Unfortunately, `false` appears to be required for Android 8+ ## Using the `hinfo` option means that some responses will be lies.
## Unfortunately, the `hinfo` option appears to be required for Android 8+
refused_code_in_responses = false # blocked_query_response = 'refused'
## Load-balancing strategy: 'p2' (default), 'ph', 'first' or 'random' ## Load-balancing strategy: 'p2' (default), 'ph', 'first' or 'random'
@ -523,7 +524,7 @@ cache_neg_max_ttl = 600
[sources.'public-resolvers'] [sources.'public-resolvers']
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md'] urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md']
cache_file = '/tmp/public-resolvers.md' cache_file = '/var/cache/dnscrypt-proxy/public-resolvers.md'
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3' minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
prefix = '' prefix = ''
@ -550,5 +551,10 @@ cache_neg_max_ttl = 600
[static] [static]
{% if custom_server_stamps %}{% for name, stamp in custom_server_stamps.items() %}
[static.'{{ name }}']
stamp = '{{ stamp }}'
{%- endfor %}{% endif %}
# [static.'myserver'] # [static.'myserver']
# stamp = 'sdns:AQcAAAAAAAAAAAAQMi5kbnNjcnlwdC1jZXJ0Lg' # stamp = 'sdns:AQcAAAAAAAAAAAAQMi5kbnNjcnlwdC1jZXJ0Lg'

@ -1,4 +1,16 @@
--- ---
- pause:
prompt: "{{ item }}"
when: not tests|default(false)|bool
tags:
- skip_ansible_lint
with_items: |
https://trailofbits.github.io/algo/deploy-to-ubuntu.html
Local installation might break your server. Use at your own risk.
Proceed? Press ENTER to continue or CTRL+C and A to abort...
- pause: - pause:
prompt: | prompt: |
Enter the IP address of your server: (or use localhost for local installation): Enter the IP address of your server: (or use localhost for local installation):
@ -8,25 +20,25 @@
- name: Set the facts - name: Set the facts
set_fact: set_fact:
cloud_instance_ip: >- cloud_instance_ip: >-
{% if server is defined %}{{ server }} {% if server is defined %}{{ server }}
{%- elif _algo_server.user_input %}{{ _algo_server.user_input }} {%- elif _algo_server.user_input %}{{ _algo_server.user_input }}
{%- else %}localhost{% endif %} {%- else %}localhost{% endif %}
- block: - block:
- pause: - pause:
prompt: | prompt: |
What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost) What user should we use to login on the server? (note: passwordless login required, or ignore if you're deploying to localhost)
[root] [root]
register: _algo_ssh_user register: _algo_ssh_user
when: ssh_user is undefined when: ssh_user is undefined
- name: Set the facts - name: Set the facts
set_fact: set_fact:
ansible_ssh_user: >- ansible_ssh_user: >-
{% if ssh_user is defined %}{{ ssh_user }} {% if ssh_user is defined %}{{ ssh_user }}
{%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }} {%- elif _algo_ssh_user.user_input %}{{ _algo_ssh_user.user_input }}
{%- else %}root{% endif %} {%- else %}root{% endif %}
when: cloud_instance_ip != "localhost" when: cloud_instance_ip != "localhost"
- pause: - pause:

@ -1,2 +1,2 @@
--- ---
ssh_tunnels_config_path: "configs/{{ IP_subject_alt_name }}/ssh-tunnel/" ssh_tunnels_config_path: configs/{{ IP_subject_alt_name }}/ssh-tunnel/

@ -1,2 +1,3 @@
---
- name: restart ssh - name: restart ssh
service: name="{{ ssh_service_name|default('ssh') }}" state=restarted service: name="{{ ssh_service_name|default('ssh') }}" state=restarted

@ -2,7 +2,7 @@
- name: Ensure that the sshd_config file has desired options - name: Ensure that the sshd_config file has desired options
blockinfile: blockinfile:
dest: /etc/ssh/sshd_config dest: /etc/ssh/sshd_config
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role' marker: "# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role"
block: | block: |
Match Group algo Match Group algo
AllowTcpForwarding local AllowTcpForwarding local
@ -28,90 +28,90 @@
group: "{{ root_group|default('root') }}" group: "{{ root_group|default('root') }}"
- block: - block:
- name: Ensure that the SSH users exist - name: Ensure that the SSH users exist
user: user:
name: "{{ item }}" name: "{{ item }}"
group: algo group: algo
home: '/var/jail/{{ item }}' home: /var/jail/{{ item }}
createhome: yes createhome: true
generate_ssh_key: false generate_ssh_key: false
shell: /bin/false shell: /bin/false
state: present state: present
append: yes append: true
with_items: "{{ users }}" with_items: "{{ users }}"
- block: - block:
- name: Clean up the ssh-tunnel directory - name: Clean up the ssh-tunnel directory
file: file:
dest: "{{ ssh_tunnels_config_path }}" dest: "{{ ssh_tunnels_config_path }}"
state: absent state: absent
when: keys_clean_all|bool when: keys_clean_all|bool
- name: Ensure the config directories exist - name: Ensure the config directories exist
file: file:
dest: "{{ ssh_tunnels_config_path }}" dest: "{{ ssh_tunnels_config_path }}"
state: directory state: directory
recurse: yes recurse: true
mode: '0700' mode: "0700"
- name: Check if the private keys exist - name: Check if the private keys exist
stat: stat:
path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem" path: "{{ ssh_tunnels_config_path }}/{{ item }}.pem"
register: privatekey register: privatekey
with_items: "{{ users }}" with_items: "{{ users }}"
- name: Build ssh private keys - name: Build ssh private keys
openssl_privatekey: openssl_privatekey:
path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem" path: "{{ ssh_tunnels_config_path }}/{{ item.item }}.pem"
passphrase: "{{ p12_export_password }}" passphrase: "{{ p12_export_password }}"
cipher: aes256 cipher: auto
force: false force: false
no_log: true no_log: "{{ no_log|bool }}"
when: not item.stat.exists when: not item.stat.exists
with_items: "{{ privatekey.results }}" with_items: "{{ privatekey.results }}"
register: openssl_privatekey register: openssl_privatekey
- name: Build ssh public keys - name: Build ssh public keys
openssl_publickey: openssl_publickey:
path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub" path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pub"
privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem" privatekey_path: "{{ ssh_tunnels_config_path }}/{{ item.item.item }}.pem"
privatekey_passphrase: "{{ p12_export_password }}" privatekey_passphrase: "{{ p12_export_password }}"
format: OpenSSH format: OpenSSH
force: true force: true
no_log: true no_log: "{{ no_log|bool }}"
when: item.changed when: item.changed
with_items: "{{ openssl_privatekey.results }}" with_items: "{{ openssl_privatekey.results }}"
- name: Build the client ssh config - name: Build the client ssh config
template: template:
src: ssh_config.j2 src: ssh_config.j2
dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config" dest: "{{ ssh_tunnels_config_path }}/{{ item }}.ssh_config"
mode: 0700 mode: 0700
with_items: "{{ users }}" with_items: "{{ users }}"
delegate_to: localhost delegate_to: localhost
become: false become: false
- name: The authorized keys file created - name: The authorized keys file created
authorized_key: authorized_key:
user: "{{ item }}" user: "{{ item }}"
key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}" key: "{{ lookup('file', ssh_tunnels_config_path + '/' + item + '.pub') }}"
state: present state: present
manage_dir: true manage_dir: true
exclusive: true exclusive: true
with_items: "{{ users }}" with_items: "{{ users }}"
- name: Get active users - name: Get active users
getent: getent:
database: group database: group
key: algo key: algo
split: ':' split: ":"
- name: Delete non-existing users - name: Delete non-existing users
user: user:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
remove: yes remove: true
force: yes force: true
when: item not in users when: item not in users
with_items: "{{ getent_group['algo'][2].split(',') }}" with_items: "{{ getent_group['algo'][2].split(',') }}"
tags: update-users tags: update-users

@ -2,6 +2,7 @@ Host algo
DynamicForward 127.0.0.1:1080 DynamicForward 127.0.0.1:1080
LogLevel quiet LogLevel quiet
Compression yes Compression yes
IdentitiesOnly yes
IdentityFile {{ item }}.ssh.pem IdentityFile {{ item }}.ssh.pem
User {{ item }} User {{ item }}
Hostname {{ IP_subject_alt_name }} Hostname {{ IP_subject_alt_name }}

@ -1,5 +1,5 @@
--- ---
ipsec_config_path: "configs/{{ IP_subject_alt_name }}/ipsec/" ipsec_config_path: configs/{{ IP_subject_alt_name }}/ipsec/
ipsec_pki_path: "{{ ipsec_config_path }}/.pki/" ipsec_pki_path: "{{ ipsec_config_path }}/.pki/"
strongswan_shell: /usr/sbin/nologin strongswan_shell: /usr/sbin/nologin
strongswan_home: /var/lib/strongswan strongswan_home: /var/lib/strongswan
@ -7,7 +7,7 @@ strongswan_service: "{{ 'strongswan-starter' if ansible_facts['distribution_vers
BetweenClients_DROP: true BetweenClients_DROP: true
algo_ondemand_cellular: false algo_ondemand_cellular: false
algo_ondemand_wifi: false algo_ondemand_wifi: false
algo_ondemand_wifi_exclude: '_null' algo_ondemand_wifi_exclude: _null
algo_dns_adblocking: false algo_dns_adblocking: false
ipv6_support: false ipv6_support: false
dns_encryption: true dns_encryption: true
@ -16,7 +16,7 @@ subjectAltName_type: "{{ 'DNS' if IP_subject_alt_name|regex_search('[a-z]') else
subjectAltName: >- subjectAltName: >-
{{ subjectAltName_type }}:{{ IP_subject_alt_name }} {{ subjectAltName_type }}:{{ IP_subject_alt_name }}
{%- if ipv6_support -%},IP:{{ ansible_default_ipv6['address'] }}{%- endif -%} {%- if ipv6_support -%},IP:{{ ansible_default_ipv6['address'] }}{%- endif -%}
subjectAltName_USER: "email:{{ item }}@{{ openssl_constraint_random_id }}" subjectAltName_USER: email:{{ item }}@{{ openssl_constraint_random_id }}
nameConstraints: >- nameConstraints: >-
critical,permitted;{{ subjectAltName_type }}:{{ IP_subject_alt_name }}{{- '/255.255.255.255' if subjectAltName_type == 'IP' else '' -}} critical,permitted;{{ subjectAltName_type }}:{{ IP_subject_alt_name }}{{- '/255.255.255.255' if subjectAltName_type == 'IP' else '' -}}
{%- if subjectAltName_type == 'IP' -%} {%- if subjectAltName_type == 'IP' -%}

@ -1,3 +1,4 @@
---
- name: restart strongswan - name: restart strongswan
service: name={{ strongswan_service }} state=restarted service: name={{ strongswan_service }} state=restarted

@ -4,7 +4,7 @@
set -o pipefail set -o pipefail
cat private/{{ item }}.p12 | cat private/{{ item }}.p12 |
base64 base64
register: PayloadContent register: PayloadContent
changed_when: false changed_when: false
args: args:
executable: bash executable: bash
@ -23,7 +23,7 @@
with_together: with_together:
- "{{ users }}" - "{{ users }}"
- "{{ PayloadContent.results }}" - "{{ PayloadContent.results }}"
no_log: True no_log: "{{ no_log|bool }}"
- name: Build the client ipsec config file - name: Build the client ipsec config file
template: template:

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save