Implementing blocks and additional fail hints #487 (#497)

change the troubleshooting url
pull/502/merge
Jack Ivanov 7 years ago committed by Dan Guido
parent 2f5c050fd2
commit bd348af9c2

@ -78,3 +78,8 @@ cloud_providers:
size: f1-micro size: f1-micro
image: ubuntu-1604 # ubuntu-1604 / ubuntu-1704 image: ubuntu-1604 # ubuntu-1604 / ubuntu-1704
local: local:
fail_hint:
- Sorry, but something went wrong!
- Please check the troubleshooting guide.
- https://trailofbits.github.io/algo/troubleshooting.html

@ -5,15 +5,21 @@
- config.cfg - config.cfg
pre_tasks: pre_tasks:
- name: Local pre-tasks - block:
include: playbooks/local.yml - name: Local pre-tasks
tags: [ 'always' ] include: playbooks/local.yml
tags: [ 'always' ]
- name: Local pre-tasks - name: Local pre-tasks
include: playbooks/local_ssh.yml include: playbooks/local_ssh.yml
become: false become: false
when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y" when: Deployed_By_Algo is defined and Deployed_By_Algo == "Y"
tags: [ 'local' ] tags: [ 'local' ]
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
roles: roles:
- { role: cloud-digitalocean, tags: ['digitalocean'] } - { role: cloud-digitalocean, tags: ['digitalocean'] }
@ -23,10 +29,16 @@
- { role: local, tags: ['local'] } - { role: local, tags: ['local'] }
post_tasks: post_tasks:
- name: Local post-tasks - block:
include: playbooks/post.yml - name: Local post-tasks
become: false include: playbooks/post.yml
tags: [ 'cloud' ] become: false
tags: [ 'cloud' ]
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
- name: Configure the server and install required software - name: Configure the server and install required software
hosts: vpn-host hosts: vpn-host
@ -37,9 +49,15 @@
- config.cfg - config.cfg
pre_tasks: pre_tasks:
- name: Common pre-tasks - block:
include: playbooks/common.yml - name: Common pre-tasks
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'local', 'pre' ] include: playbooks/common.yml
tags: [ 'digitalocean', 'ec2', 'gce', 'azure', 'local', 'pre' ]
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
roles: roles:
- { role: security, tags: [ 'security' ] } - { role: security, tags: [ 'security' ] }
@ -48,25 +66,31 @@
- { role: vpn, tags: [ 'vpn' ] } - { role: vpn, tags: [ 'vpn' ] }
post_tasks: post_tasks:
- debug: - block:
msg: - debug:
- "{{ congrats.common.split('\n') }}" msg:
- " {{ congrats.p12_pass }}" - "{{ congrats.common.split('\n') }}"
- " {% if Store_CAKEY is defined and Store_CAKEY == 'N' %}{% else %}{{ congrats.ca_key_pass }}{% endif %}" - " {{ congrats.p12_pass }}"
- " {% if cloud_deployment is defined %}{{ congrats.ssh_access }}{% endif %}" - " {% if Store_CAKEY is defined and Store_CAKEY == 'N' %}{% else %}{{ congrats.ca_key_pass }}{% endif %}"
tags: always - " {% if cloud_deployment is defined %}{{ congrats.ssh_access }}{% endif %}"
tags: always
- name: Save the CA key password - name: Save the CA key password
local_action: > local_action: >
shell echo "{{ easyrsa_CA_password }}" > /tmp/ca_password shell echo "{{ easyrsa_CA_password }}" > /tmp/ca_password
become: no become: no
tags: tests tags: tests
- name: Delete the CA key - name: Delete the CA key
local_action: local_action:
module: file module: file
path: "configs/{{ IP_subject_alt_name }}/pki/private/cakey.pem" path: "configs/{{ IP_subject_alt_name }}/pki/private/cakey.pem"
state: absent state: absent
become: no become: no
tags: always tags: always
when: Store_CAKEY is defined and Store_CAKEY == "N" when: Store_CAKEY is defined and Store_CAKEY == "N"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

@ -1,138 +1,143 @@
--- ---
- block:
- set_fact:
resource_group: "Algo_{{ region }}"
secret: "{{ azure_secret | default(lookup('env','AZURE_SECRET'), true) }}"
tenant: "{{ azure_tenant | default(lookup('env','AZURE_TENANT'), true) }}"
client_id: "{{ azure_client_id | default(lookup('env','AZURE_CLIENT_ID'), true) }}"
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}"
- set_fact: - name: Create a resource group
resource_group: "Algo_{{ region }}" azure_rm_resourcegroup:
secret: "{{ azure_secret | default(lookup('env','AZURE_SECRET'), true) }}" secret: "{{ secret }}"
tenant: "{{ azure_tenant | default(lookup('env','AZURE_TENANT'), true) }}" tenant: "{{ tenant }}"
client_id: "{{ azure_client_id | default(lookup('env','AZURE_CLIENT_ID'), true) }}" client_id: "{{ client_id }}"
subscription_id: "{{ azure_subscription_id | default(lookup('env','AZURE_SUBSCRIPTION_ID'), true) }}" subscription_id: "{{ subscription_id }}"
name: "{{ resource_group }}"
location: "{{ region }}"
tags:
Environment: Algo
- name: Create a resource group - name: Create a virtual network
azure_rm_resourcegroup: azure_rm_virtualnetwork:
secret: "{{ secret }}" secret: "{{ secret }}"
tenant: "{{ tenant }}" tenant: "{{ tenant }}"
client_id: "{{ client_id }}" client_id: "{{ client_id }}"
subscription_id: "{{ subscription_id }}" subscription_id: "{{ subscription_id }}"
name: "{{ resource_group }}" resource_group: "{{ resource_group }}"
location: "{{ region }}" name: algo_net
tags: address_prefixes: "10.10.0.0/16"
Environment: Algo tags:
Environment: Algo
- name: Create a virtual network - name: Create a security group
azure_rm_virtualnetwork: azure_rm_securitygroup:
secret: "{{ secret }}" secret: "{{ secret }}"
tenant: "{{ tenant }}" tenant: "{{ tenant }}"
client_id: "{{ client_id }}" client_id: "{{ client_id }}"
subscription_id: "{{ subscription_id }}" subscription_id: "{{ subscription_id }}"
resource_group: "{{ resource_group }}" resource_group: "{{ resource_group }}"
name: algo_net name: AlgoSecGroup
address_prefixes: "10.10.0.0/16" purge_rules: yes
tags: rules:
Environment: Algo - name: AllowSSH
protocol: Tcp
destination_port_range: 22
access: Allow
priority: 100
direction: Inbound
- name: AllowIPSEC500
protocol: Udp
destination_port_range: 500
access: Allow
priority: 110
direction: Inbound
- name: AllowIPSEC4500
protocol: Udp
destination_port_range: 4500
access: Allow
priority: 120
direction: Inbound
- name: Create a security group - name: Create a subnet
azure_rm_securitygroup: azure_rm_subnet:
secret: "{{ secret }}" secret: "{{ secret }}"
tenant: "{{ tenant }}" tenant: "{{ tenant }}"
client_id: "{{ client_id }}" client_id: "{{ client_id }}"
subscription_id: "{{ subscription_id }}" subscription_id: "{{ subscription_id }}"
resource_group: "{{ resource_group }}" resource_group: "{{ resource_group }}"
name: AlgoSecGroup name: algo_subnet
purge_rules: yes address_prefix: "10.10.0.0/24"
rules: virtual_network: algo_net
- name: AllowSSH security_group_name: AlgoSecGroup
protocol: Tcp tags:
destination_port_range: 22 Environment: Algo
access: Allow
priority: 100
direction: Inbound
- name: AllowIPSEC500
protocol: Udp
destination_port_range: 500
access: Allow
priority: 110
direction: Inbound
- name: AllowIPSEC4500
protocol: Udp
destination_port_range: 4500
access: Allow
priority: 120
direction: Inbound
- name: Create a subnet - name: Create an instance
azure_rm_subnet: azure_rm_virtualmachine:
secret: "{{ secret }}" secret: "{{ secret }}"
tenant: "{{ tenant }}" tenant: "{{ tenant }}"
client_id: "{{ client_id }}" client_id: "{{ client_id }}"
subscription_id: "{{ subscription_id }}" subscription_id: "{{ subscription_id }}"
resource_group: "{{ resource_group }}" resource_group: "{{ resource_group }}"
name: algo_subnet admin_username: ubuntu
address_prefix: "10.10.0.0/24" virtual_network: algo_net
virtual_network: algo_net name: "{{ azure_server_name }}"
security_group_name: AlgoSecGroup ssh_password_enabled: false
tags: vm_size: "{{ cloud_providers.azure.size }}"
Environment: Algo tags:
Environment: Algo
ssh_public_keys:
- { path: "/home/ubuntu/.ssh/authorized_keys", key_data: "{{ lookup('file', '{{ SSH_keys.public }}') }}" }
image: "{{ cloud_providers.azure.image }}"
register: azure_rm_virtualmachine
- name: Create an instance # To-do: Add error handling - if vm_size requested is not available, can we fall back to another, ideally with a prompt?
azure_rm_virtualmachine:
secret: "{{ secret }}"
tenant: "{{ tenant }}"
client_id: "{{ client_id }}"
subscription_id: "{{ subscription_id }}"
resource_group: "{{ resource_group }}"
admin_username: ubuntu
virtual_network: algo_net
name: "{{ azure_server_name }}"
ssh_password_enabled: false
vm_size: "{{ cloud_providers.azure.size }}"
tags:
Environment: Algo
ssh_public_keys:
- { path: "/home/ubuntu/.ssh/authorized_keys", key_data: "{{ lookup('file', '{{ SSH_keys.public }}') }}" }
image: "{{ cloud_providers.azure.image }}"
register: azure_rm_virtualmachine
# To-do: Add error handling - if vm_size requested is not available, can we fall back to another, ideally with a prompt? - set_fact:
ip_address: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}"
networkinterface_name: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].name }}"
- set_fact: - name: Ensure the network interface includes all required parameters
ip_address: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}" azure_rm_networkinterface:
networkinterface_name: "{{ azure_rm_virtualmachine.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].name }}" secret: "{{ secret }}"
tenant: "{{ tenant }}"
client_id: "{{ client_id }}"
subscription_id: "{{ subscription_id }}"
name: "{{ networkinterface_name }}"
resource_group: "{{ resource_group }}"
virtual_network_name: algo_net
subnet_name: algo_subnet
security_group_name: AlgoSecGroup
- name: Ensure the network interface includes all required parameters - name: Add the instance to an inventory group
azure_rm_networkinterface: add_host:
secret: "{{ secret }}" name: "{{ ip_address }}"
tenant: "{{ tenant }}" groups: vpn-host
client_id: "{{ client_id }}" ansible_ssh_user: ubuntu
subscription_id: "{{ subscription_id }}" ansible_python_interpreter: "/usr/bin/python2.7"
name: "{{ networkinterface_name }}" ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
resource_group: "{{ resource_group }}" cloud_provider: azure
virtual_network_name: algo_net ipv6_support: no
subnet_name: algo_subnet
security_group_name: AlgoSecGroup
- name: Add the instance to an inventory group - set_fact:
add_host: cloud_instance_ip: "{{ ip_address }}"
name: "{{ ip_address }}"
groups: vpn-host
ansible_ssh_user: ubuntu
ansible_python_interpreter: "/usr/bin/python2.7"
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
cloud_provider: azure
ipv6_support: no
- set_fact: - name: Ensure the group azure exists in the dynamic inventory file
cloud_instance_ip: "{{ ip_address }}" lineinfile:
state: present
dest: configs/inventory.dynamic
line: '[azure]'
- name: Ensure the group azure exists in the dynamic inventory file - name: Populate the dynamic inventory
lineinfile: lineinfile:
state: present state: present
dest: configs/inventory.dynamic dest: configs/inventory.dynamic
line: '[azure]' insertafter: '\[azure\]'
regexp: "^{{ cloud_instance_ip }}.*"
- name: Populate the dynamic inventory line: "{{ cloud_instance_ip }}"
lineinfile: rescue:
state: present - debug: var=fail_hint
dest: configs/inventory.dynamic tags: always
insertafter: '\[azure\]' - fail:
regexp: "^{{ cloud_instance_ip }}.*" tags: always
line: "{{ cloud_instance_ip }}"

@ -1,102 +1,108 @@
- name: Set the DigitalOcean Access Token fact
set_fact:
do_token: "{{ do_access_token | default(lookup('env','DO_API_TOKEN'), true) }}"
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
- block: - block:
- name: "Delete the existing Algo SSH keys" - name: Set the DigitalOcean Access Token fact
set_fact:
do_token: "{{ do_access_token | default(lookup('env','DO_API_TOKEN'), true) }}"
public_key: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
- block:
- name: "Delete the existing Algo SSH keys"
digital_ocean:
state: absent
command: ssh
api_token: "{{ do_token }}"
name: "{{ SSH_keys.comment }}"
register: ssh_keys
until: ssh_keys.changed != true
retries: 10
delay: 1
rescue:
- name: Collect the fail error
digital_ocean:
state: absent
command: ssh
api_token: "{{ do_token }}"
name: "{{ SSH_keys.comment }}"
register: ssh_keys
ignore_errors: yes
- debug: var=ssh_keys
- fail:
msg: "Please, ensure that your API token is not read-only."
- name: "Upload the SSH key"
digital_ocean: digital_ocean:
state: absent state: present
command: ssh command: ssh
ssh_pub_key: "{{ public_key }}"
api_token: "{{ do_token }}" api_token: "{{ do_token }}"
name: "{{ SSH_keys.comment }}" name: "{{ SSH_keys.comment }}"
register: ssh_keys register: do_ssh_key
until: ssh_keys.changed != true
retries: 10
delay: 1
rescue: - name: "Creating a droplet..."
- name: Collect the fail error
digital_ocean: digital_ocean:
state: absent state: present
command: ssh command: droplet
name: "{{ do_server_name }}"
region_id: "{{ do_region }}"
size_id: "{{ cloud_providers.digitalocean.size }}"
image_id: "{{ cloud_providers.digitalocean.image }}"
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
unique_name: yes
api_token: "{{ do_token }}" api_token: "{{ do_token }}"
name: "{{ SSH_keys.comment }}" ipv6: yes
register: ssh_keys register: do
ignore_errors: yes
- debug: var=ssh_keys - name: Add the droplet to an inventory group
add_host:
name: "{{ do.droplet.ip_address }}"
groups: vpn-host
ansible_ssh_user: root
ansible_python_interpreter: "/usr/bin/python2.7"
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
do_access_token: "{{ do_token }}"
do_droplet_id: "{{ do.droplet.id }}"
cloud_provider: digitalocean
ipv6_support: true
- fail: - set_fact:
msg: "Please, ensure that your API token is not read-only." cloud_instance_ip: "{{ do.droplet.ip_address }}"
- name: "Upload the SSH key"
digital_ocean:
state: present
command: ssh
ssh_pub_key: "{{ public_key }}"
api_token: "{{ do_token }}"
name: "{{ SSH_keys.comment }}"
register: do_ssh_key
- name: "Creating a droplet..."
digital_ocean:
state: present
command: droplet
name: "{{ do_server_name }}"
region_id: "{{ do_region }}"
size_id: "{{ cloud_providers.digitalocean.size }}"
image_id: "{{ cloud_providers.digitalocean.image }}"
ssh_key_ids: "{{ do_ssh_key.ssh_key.id }}"
unique_name: yes
api_token: "{{ do_token }}"
ipv6: yes
register: do
- name: Add the droplet to an inventory group
add_host:
name: "{{ do.droplet.ip_address }}"
groups: vpn-host
ansible_ssh_user: root
ansible_python_interpreter: "/usr/bin/python2.7"
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
do_access_token: "{{ do_token }}"
do_droplet_id: "{{ do.droplet.id }}"
cloud_provider: digitalocean
ipv6_support: true
- set_fact: - name: Tag the droplet
cloud_instance_ip: "{{ do.droplet.ip_address }}" digital_ocean_tag:
name: "Environment:Algo"
- name: Tag the droplet resource_id: "{{ do.droplet.id }}"
digital_ocean_tag: api_token: "{{ do_token }}"
name: "Environment:Algo" state: present
resource_id: "{{ do.droplet.id }}"
api_token: "{{ do_token }}"
state: present
- name: Get droplets - name: Get droplets
uri: uri:
url: "https://api.digitalocean.com/v2/droplets?tag_name=Environment:Algo" url: "https://api.digitalocean.com/v2/droplets?tag_name=Environment:Algo"
method: GET method: GET
status_code: 200 status_code: 200
headers: headers:
Content-Type: "application/json" Content-Type: "application/json"
Authorization: "Bearer {{ do_token }}" Authorization: "Bearer {{ do_token }}"
register: do_droplets register: do_droplets
- name: Ensure the group digitalocean exists in the dynamic inventory file - name: Ensure the group digitalocean exists in the dynamic inventory file
lineinfile: lineinfile:
state: present state: present
dest: configs/inventory.dynamic dest: configs/inventory.dynamic
line: '[digitalocean]' line: '[digitalocean]'
- name: Populate the dynamic inventory - name: Populate the dynamic inventory
lineinfile: lineinfile:
state: present state: present
dest: configs/inventory.dynamic dest: configs/inventory.dynamic
insertafter: '\[digitalocean\]' insertafter: '\[digitalocean\]'
regexp: "^{{ item.networks.v4[0].ip_address }}.*" regexp: "^{{ item.networks.v4[0].ip_address }}.*"
line: "{{ item.networks.v4[0].ip_address }}" line: "{{ item.networks.v4[0].ip_address }}"
with_items: with_items:
- "{{ do_droplets.json.droplets }}" - "{{ do_droplets.json.droplets }}"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

@ -1,63 +1,69 @@
- set_fact: - block:
access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}" - set_fact:
secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}" access_key: "{{ aws_access_key | default(lookup('env','AWS_ACCESS_KEY_ID'), true) }}"
stack_name: "{{ aws_server_name | replace('.', '-') }}" secret_key: "{{ aws_secret_key | default(lookup('env','AWS_SECRET_ACCESS_KEY'), true) }}"
stack_name: "{{ aws_server_name | replace('.', '-') }}"
- name: Locate official AMI for region
ec2_ami_find: - name: Locate official AMI for region
aws_access_key: "{{ access_key }}" ec2_ami_find:
aws_secret_key: "{{ secret_key }}" aws_access_key: "{{ access_key }}"
name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*" aws_secret_key: "{{ secret_key }}"
owner: "{{ cloud_providers.ec2.image.owner }}" name: "ubuntu/images/hvm-ssd/{{ cloud_providers.ec2.image.name }}-amd64-server-*"
sort: creationDate owner: "{{ cloud_providers.ec2.image.owner }}"
sort_order: descending sort: creationDate
sort_end: 1 sort_order: descending
region: "{{ region }}" sort_end: 1
register: ami_search region: "{{ region }}"
register: ami_search
- set_fact:
ami_image: "{{ ami_search.results[0].ami_id }}" - set_fact:
ami_image: "{{ ami_search.results[0].ami_id }}"
- include: encrypt_image.yml
tags: [encrypted] - include: encrypt_image.yml
tags: [encrypted]
- include: cloudformation.yml
- include: cloudformation.yml
- name: Add new instance to host group
add_host: - name: Add new instance to host group
hostname: "{{ stack.stack_outputs.PublicIP }}" add_host:
groupname: vpn-host hostname: "{{ stack.stack_outputs.PublicIP }}"
ansible_ssh_user: ubuntu groupname: vpn-host
ansible_python_interpreter: "/usr/bin/python2.7" ansible_ssh_user: ubuntu
ansible_ssh_private_key_file: "{{ SSH_keys.private }}" ansible_python_interpreter: "/usr/bin/python2.7"
cloud_provider: ec2 ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
ipv6_support: yes cloud_provider: ec2
ipv6_support: yes
- set_fact:
cloud_instance_ip: "{{ stack.stack_outputs.PublicIP }}" - set_fact:
cloud_instance_ip: "{{ stack.stack_outputs.PublicIP }}"
- name: Get EC2 instances
ec2_remote_facts: - name: Get EC2 instances
aws_access_key: "{{ access_key }}" ec2_remote_facts:
aws_secret_key: "{{ secret_key }}" aws_access_key: "{{ access_key }}"
region: "{{ region }}" aws_secret_key: "{{ secret_key }}"
filters: region: "{{ region }}"
instance-state-name: running filters:
"tag:Environment": Algo instance-state-name: running
register: algo_instances "tag:Environment": Algo
register: algo_instances
- name: Ensure the group ec2 exists in the dynamic inventory file
lineinfile: - name: Ensure the group ec2 exists in the dynamic inventory file
state: present lineinfile:
dest: configs/inventory.dynamic state: present
line: '[ec2]' dest: configs/inventory.dynamic
line: '[ec2]'
- name: Populate the dynamic inventory
lineinfile: - name: Populate the dynamic inventory
state: present lineinfile:
dest: configs/inventory.dynamic state: present
insertafter: '\[ec2\]' dest: configs/inventory.dynamic
regexp: "^{{ item.public_ip_address }}.*" insertafter: '\[ec2\]'
line: "{{ item.public_ip_address }}" regexp: "^{{ item.public_ip_address }}.*"
with_items: line: "{{ item.public_ip_address }}"
- "{{ algo_instances.instances }}" with_items:
- "{{ algo_instances.instances }}"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

@ -1,64 +1,70 @@
- set_fact: - block:
credentials_file_path: "{{ credentials_file | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}" - set_fact:
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}" credentials_file_path: "{{ credentials_file | default(lookup('env','GCE_CREDENTIALS_FILE_PATH'), true) }}"
ssh_public_key_lookup: "{{ lookup('file', '{{ SSH_keys.public }}') }}"
- set_fact: - set_fact:
credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}" credentials_file_lookup: "{{ lookup('file', '{{ credentials_file_path }}') }}"
- set_fact: - set_fact:
service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}" service_account_email: "{{ credentials_file_lookup.client_email | default(lookup('env','GCE_EMAIL')) }}"
project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}" project_id: "{{ credentials_file_lookup.project_id | default(lookup('env','GCE_PROJECT')) }}"
- name: "Creating a new instance..." - name: "Creating a new instance..."
gce: gce:
instance_names: "{{ server_name }}" instance_names: "{{ server_name }}"
zone: "{{ zone }}" zone: "{{ zone }}"
machine_type: "{{ cloud_providers.gce.size }}" machine_type: "{{ cloud_providers.gce.size }}"
image: "{{ cloud_providers.gce.image }}" image: "{{ cloud_providers.gce.image }}"
service_account_email: "{{ service_account_email }}" service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file_path }}" credentials_file: "{{ credentials_file_path }}"
project_id: "{{ project_id }}" project_id: "{{ project_id }}"
metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}' metadata: '{"ssh-keys":"ubuntu:{{ ssh_public_key_lookup }}"}'
# ip_forward: true # ip_forward: true
tags: tags:
- "environment-algo" - "environment-algo"
register: google_vm register: google_vm
- name: Add the instance to an inventory group - name: Add the instance to an inventory group
add_host: add_host:
name: "{{ google_vm.instance_data[0].public_ip }}" name: "{{ google_vm.instance_data[0].public_ip }}"
groups: vpn-host groups: vpn-host
ansible_ssh_user: ubuntu ansible_ssh_user: ubuntu
ansible_python_interpreter: "/usr/bin/python2.7" ansible_python_interpreter: "/usr/bin/python2.7"
ansible_ssh_private_key_file: "{{ SSH_keys.private }}" ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
cloud_provider: gce cloud_provider: gce
ipv6_support: no ipv6_support: no
- name: Firewall configured - name: Firewall configured
local_action: local_action:
module: gce_net module: gce_net
name: "{{ google_vm.instance_data[0].network }}" name: "{{ google_vm.instance_data[0].network }}"
fwname: "algo-ikev2" fwname: "algo-ikev2"
allowed: "udp:500,4500;tcp:22" allowed: "udp:500,4500;tcp:22"
state: "present" state: "present"
src_range: 0.0.0.0/0 src_range: 0.0.0.0/0
service_account_email: "{{ credentials_file_lookup.client_email }}" service_account_email: "{{ credentials_file_lookup.client_email }}"
credentials_file: "{{ credentials_file }}" credentials_file: "{{ credentials_file }}"
project_id: "{{ credentials_file_lookup.project_id }}" project_id: "{{ credentials_file_lookup.project_id }}"
- set_fact: - set_fact:
cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}" cloud_instance_ip: "{{ google_vm.instance_data[0].public_ip }}"
- name: Ensure the group gce exists in the dynamic inventory file - name: Ensure the group gce exists in the dynamic inventory file
lineinfile: lineinfile:
state: present state: present
dest: configs/inventory.dynamic dest: configs/inventory.dynamic
line: '[gce]' line: '[gce]'
- name: Populate the dynamic inventory - name: Populate the dynamic inventory
lineinfile: lineinfile:
state: present state: present
dest: configs/inventory.dynamic dest: configs/inventory.dynamic
insertafter: '\[gce\]' insertafter: '\[gce\]'
regexp: "^{{ google_vm.instance_data[0].public_ip }}.*" regexp: "^{{ google_vm.instance_data[0].public_ip }}.*"
line: "{{ google_vm.instance_data[0].public_ip }}" line: "{{ google_vm.instance_data[0].public_ip }}"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

@ -1,28 +1,28 @@
--- ---
- block:
- include: ubuntu.yml
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- name: Gather Facts - include: freebsd.yml
setup: when: ansible_distribution == 'FreeBSD'
tags:
- always
- include: ubuntu.yml - name: Install tools
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' package: name="{{ item }}" state=present
with_items:
- "{{ tools|default([]) }}"
tags:
- always
- include: freebsd.yml - name: Sysctl tuning
when: ansible_distribution == 'FreeBSD' sysctl: name="{{ item.item }}" value="{{ item.value }}"
with_items:
- "{{ sysctl|default([]) }}"
tags:
- always
- name: Install tools - meta: flush_handlers
package: name="{{ item }}" state=present rescue:
with_items: - debug: var=fail_hint
- "{{ tools|default([]) }}" tags: always
tags: - fail:
- always tags: always
- name: Sysctl tuning
sysctl: name="{{ item.item }}" value="{{ item.value }}"
with_items:
- "{{ sysctl|default([]) }}"
tags:
- always
- meta: flush_handlers

@ -1,41 +1,46 @@
--- ---
- block:
- name: Dnsmasq installed - name: Dnsmasq installed
package: name=dnsmasq package: name=dnsmasq
- name: Ensure that the dnsmasq user exist - name: Ensure that the dnsmasq user exist
user: name=dnsmasq groups=nogroup append=yes state=present user: name=dnsmasq groups=nogroup append=yes state=present
- name: The dnsmasq directory created - name: The dnsmasq directory created
file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup file: dest=/var/lib/dnsmasq state=directory mode=0755 owner=dnsmasq group=nogroup
- include: ubuntu.yml - include: ubuntu.yml
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- include: freebsd.yml - include: freebsd.yml
when: ansible_distribution == 'FreeBSD' when: ansible_distribution == 'FreeBSD'
- name: Dnsmasq configured - name: Dnsmasq configured
template: src=dnsmasq.conf.j2 dest="{{ config_prefix|default('/') }}etc/dnsmasq.conf" template: src=dnsmasq.conf.j2 dest="{{ config_prefix|default('/') }}etc/dnsmasq.conf"
notify: notify:
- restart dnsmasq - restart dnsmasq
- name: Adblock script created - name: Adblock script created
template: src=adblock.sh dest=/usr/local/sbin/adblock.sh owner=root group="{{ root_group|default('root') }}" mode=0755 template: src=adblock.sh dest=/usr/local/sbin/adblock.sh owner=root group="{{ root_group|default('root') }}" mode=0755
- name: Adblock script added to cron - name: Adblock script added to cron
cron: cron:
name: Adblock hosts update name: Adblock hosts update
minute: 10 minute: 10
hour: 2 hour: 2
job: /usr/local/sbin/adblock.sh job: /usr/local/sbin/adblock.sh
user: dnsmasq user: dnsmasq
- name: Update adblock hosts - name: Update adblock hosts
shell: > shell: >
sudo -u dnsmasq "/usr/local/sbin/adblock.sh" sudo -u dnsmasq "/usr/local/sbin/adblock.sh"
- meta: flush_handlers - meta: flush_handlers
- name: Dnsmasq enabled and started - name: Dnsmasq enabled and started
service: name=dnsmasq state=started enabled=yes service: name=dnsmasq state=started enabled=yes
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

@ -1,35 +1,42 @@
- name: Add the instance to an inventory group ---
add_host: - block:
name: "{{ server_ip }}" - name: Add the instance to an inventory group
groups: vpn-host add_host:
ansible_ssh_user: "{{ server_user }}" name: "{{ server_ip }}"
ansible_python_interpreter: "/usr/bin/python2.7" groups: vpn-host
cloud_provider: local ansible_ssh_user: "{{ server_user }}"
when: server_ip != "localhost" ansible_python_interpreter: "/usr/bin/python2.7"
cloud_provider: local
when: server_ip != "localhost"
- name: Add the instance to an inventory group - name: Add the instance to an inventory group
add_host: add_host:
name: "{{ server_ip }}" name: "{{ server_ip }}"
groups: vpn-host groups: vpn-host
ansible_ssh_user: "{{ server_user }}" ansible_ssh_user: "{{ server_user }}"
ansible_python_interpreter: "/usr/bin/python2.7" ansible_python_interpreter: "/usr/bin/python2.7"
ansible_connection: local ansible_connection: local
cloud_provider: local cloud_provider: local
when: server_ip == "localhost" when: server_ip == "localhost"
- set_fact: - set_fact:
cloud_instance_ip: "{{ server_ip }}" cloud_instance_ip: "{{ server_ip }}"
- name: Ensure the group local exists in the dynamic inventory file - name: Ensure the group local exists in the dynamic inventory file
lineinfile: lineinfile:
state: present state: present
dest: configs/inventory.dynamic dest: configs/inventory.dynamic
line: '[local]' line: '[local]'
- name: Populate the dynamic inventory - name: Populate the dynamic inventory
lineinfile: lineinfile:
state: present state: present
dest: configs/inventory.dynamic dest: configs/inventory.dynamic
insertafter: '\[local\]' insertafter: '\[local\]'
regexp: "^{{ server_ip }}.*" regexp: "^{{ server_ip }}.*"
line: "{{ server_ip }}" line: "{{ server_ip }}"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

@ -1,96 +1,101 @@
--- ---
- block:
- name: Install tools - name: Install tools
apt: name="{{ item }}" state=latest apt: name="{{ item }}" state=latest
with_items: with_items:
- unattended-upgrades - unattended-upgrades
- name: Configure unattended-upgrades - name: Configure unattended-upgrades
template: src=50unattended-upgrades.j2 dest=/etc/apt/apt.conf.d/50unattended-upgrades owner=root group=root mode=0644 template: src=50unattended-upgrades.j2 dest=/etc/apt/apt.conf.d/50unattended-upgrades owner=root group=root mode=0644
- name: Periodic upgrades configured - name: Periodic upgrades configured
template: src=10periodic.j2 dest=/etc/apt/apt.conf.d/10periodic owner=root group=root mode=0644 template: src=10periodic.j2 dest=/etc/apt/apt.conf.d/10periodic owner=root group=root mode=0644
- name: Find directories for minimizing access - name: Find directories for minimizing access
stat: stat:
path: "{{ item }}" path: "{{ item }}"
register: minimize_access_directories register: minimize_access_directories
with_items: with_items:
- '/usr/local/sbin' - '/usr/local/sbin'
- '/usr/local/bin' - '/usr/local/bin'
- '/usr/sbin' - '/usr/sbin'
- '/usr/bin' - '/usr/bin'
- '/sbin' - '/sbin'
- '/bin' - '/bin'
- name: Minimize access - name: Minimize access
file: path='{{ item.stat.path }}' mode='go-w' recurse=yes file: path='{{ item.stat.path }}' mode='go-w' recurse=yes
when: item.stat.isdir when: item.stat.isdir
with_items: "{{ minimize_access_directories.results }}" with_items: "{{ minimize_access_directories.results }}"
no_log: True no_log: True
- name: Change shadow ownership to root and mode to 0600 - name: Change shadow ownership to root and mode to 0600
file: dest='/etc/shadow' owner=root group=root mode=0600 file: dest='/etc/shadow' owner=root group=root mode=0600
- name: change su-binary to only be accessible to user and group root - name: change su-binary to only be accessible to user and group root
file: dest='/bin/su' owner=root group=root mode=0750 file: dest='/bin/su' owner=root group=root mode=0750
- name: Collect Use of privileged commands - name: Collect Use of privileged commands
shell: > shell: >
/usr/bin/find {/usr/local/sbin,/usr/local/bin,/sbin,/bin,/usr/sbin,/usr/bin} -xdev \( -perm -4000 -o -perm -2000 \) -type f | awk '{print "-a always,exit -F path=" $1 " -F perm=x -F auid>=500 -F auid!=4294967295 -k privileged" }' /usr/bin/find {/usr/local/sbin,/usr/local/bin,/sbin,/bin,/usr/sbin,/usr/bin} -xdev \( -perm -4000 -o -perm -2000 \) -type f | awk '{print "-a always,exit -F path=" $1 " -F perm=x -F auid>=500 -F auid!=4294967295 -k privileged" }'
args: args:
executable: /bin/bash executable: /bin/bash
register: privileged_programs register: privileged_programs
# Core dumps # Core dumps
- name: Restrict core dumps (with PAM) - name: Restrict core dumps (with PAM)
lineinfile: dest=/etc/security/limits.conf line="* hard core 0" state=present lineinfile: dest=/etc/security/limits.conf line="* hard core 0" state=present
- name: Restrict core dumps (with sysctl) - name: Restrict core dumps (with sysctl)
sysctl: name=fs.suid_dumpable value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present sysctl: name=fs.suid_dumpable value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
# Kernel fixes # Kernel fixes
- name: Disable Source Routed Packet Acceptance - name: Disable Source Routed Packet Acceptance
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
with_items: with_items:
- net.ipv4.conf.all.accept_source_route - net.ipv4.conf.all.accept_source_route
- net.ipv4.conf.default.accept_source_route - net.ipv4.conf.default.accept_source_route
notify: notify:
- flush routing cache - flush routing cache
- name: Disable ICMP Redirect Acceptance - name: Disable ICMP Redirect Acceptance
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
with_items: with_items:
- net.ipv4.conf.all.accept_redirects - net.ipv4.conf.all.accept_redirects
- net.ipv4.conf.default.accept_redirects - net.ipv4.conf.default.accept_redirects
- name: Disable Secure ICMP Redirect Acceptance - name: Disable Secure ICMP Redirect Acceptance
sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present sysctl: name="{{item}}" value=0 ignoreerrors=yes sysctl_set=yes reload=yes state=present
with_items: with_items:
- net.ipv4.conf.all.secure_redirects - net.ipv4.conf.all.secure_redirects
- net.ipv4.conf.default.secure_redirects - net.ipv4.conf.default.secure_redirects
notify: notify:
- flush routing cache - flush routing cache
- name: Enable Bad Error Message Protection - name: Enable Bad Error Message Protection
sysctl: name=net.ipv4.icmp_ignore_bogus_error_responses value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present sysctl: name=net.ipv4.icmp_ignore_bogus_error_responses value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present
notify: notify:
- flush routing cache - flush routing cache
- name: Enable RFC-recommended Source Route Validation - name: Enable RFC-recommended Source Route Validation
sysctl: name="{{item}}" value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present sysctl: name="{{item}}" value=1 ignoreerrors=yes sysctl_set=yes reload=yes state=present
with_items: with_items:
- net.ipv4.conf.all.rp_filter - net.ipv4.conf.all.rp_filter
- net.ipv4.conf.default.rp_filter - net.ipv4.conf.default.rp_filter
notify: notify:
- flush routing cache - flush routing cache
- name: Do not send ICMP redirects (we are not a router) - name: Do not send ICMP redirects (we are not a router)
sysctl: name=net.ipv4.conf.all.send_redirects value=0 sysctl: name=net.ipv4.conf.all.send_redirects value=0
- name: SSH config - name: SSH config
template: src=sshd_config.j2 dest=/etc/ssh/sshd_config owner=root group=root mode=0644 template: src=sshd_config.j2 dest=/etc/ssh/sshd_config owner=root group=root mode=0644
notify: notify:
- restart ssh - restart ssh
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always

@ -1,77 +1,82 @@
--- ---
- block:
- name: Ensure that the sshd_config file has desired options
blockinfile:
dest: /etc/ssh/sshd_config
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role'
block: |
Match Group algo
AllowTcpForwarding local
AllowAgentForwarding no
AllowStreamLocalForwarding no
PermitTunnel no
X11Forwarding no
notify:
- restart ssh
- name: Ensure that the sshd_config file has desired options - name: Ensure that the algo group exist
blockinfile: group: name=algo state=present
dest: /etc/ssh/sshd_config
marker: '# {mark} ANSIBLE MANAGED BLOCK ssh_tunneling_role'
block: |
Match Group algo
AllowTcpForwarding local
AllowAgentForwarding no
AllowStreamLocalForwarding no
PermitTunnel no
X11Forwarding no
notify:
- restart ssh
- name: Ensure that the algo group exist - name: Ensure that the jail directory exist
group: name=algo state=present file: path=/var/jail/ state=directory mode=0755 owner=root group="{{ root_group|default('root') }}"
- name: Ensure that the jail directory exist - name: Ensure that the SSH users exist
file: path=/var/jail/ state=directory mode=0755 owner=root group="{{ root_group|default('root') }}" user:
name: "{{ item }}"
groups: algo
home: '/var/jail/{{ item }}'
createhome: yes
generate_ssh_key: yes
shell: /bin/false
ssh_key_type: ecdsa
ssh_key_bits: 256
ssh_key_comment: '{{ item }}@{{ IP_subject_alt_name }}'
ssh_key_passphrase: "{{ easyrsa_p12_export_password }}"
state: present
append: yes
with_items: "{{ users }}"
- name: Ensure that the SSH users exist - name: The authorized keys file created
user: file:
name: "{{ item }}" src: '/var/jail/{{ item }}/.ssh/id_ecdsa.pub'
groups: algo dest: '/var/jail/{{ item }}/.ssh/authorized_keys'
home: '/var/jail/{{ item }}' owner: "{{ item }}"
createhome: yes group: "{{ item }}"
generate_ssh_key: yes state: link
shell: /bin/false with_items: "{{ users }}"
ssh_key_type: ecdsa
ssh_key_bits: 256
ssh_key_comment: '{{ item }}@{{ IP_subject_alt_name }}'
ssh_key_passphrase: "{{ easyrsa_p12_export_password }}"
state: present
append: yes
with_items: "{{ users }}"
- name: The authorized keys file created - name: Generate SSH fingerprints
file: shell: >
src: '/var/jail/{{ item }}/.ssh/id_ecdsa.pub' ssh-keyscan {{ IP_subject_alt_name }} 2>/dev/null
dest: '/var/jail/{{ item }}/.ssh/authorized_keys' register: ssh_fingerprints
owner: "{{ item }}"
group: "{{ item }}"
state: link
with_items: "{{ users }}"
- name: Generate SSH fingerprints - name: Fetch users SSH private keys
shell: > fetch: src='/var/jail/{{ item }}/.ssh/id_ecdsa' dest=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem flat=yes
ssh-keyscan {{ IP_subject_alt_name }} 2>/dev/null with_items: "{{ users }}"
register: ssh_fingerprints
- name: Fetch users SSH private keys - name: Change mode for SSH private keys
fetch: src='/var/jail/{{ item }}/.ssh/id_ecdsa' dest=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem flat=yes local_action: file path=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem mode=0600
with_items: "{{ users }}" with_items: "{{ users }}"
become: false
- name: Change mode for SSH private keys - name: Fetch the known_hosts file
local_action: file path=configs/{{ IP_subject_alt_name }}/{{ item }}.ssh.pem mode=0600 local_action:
with_items: "{{ users }}" module: template
become: false src: known_hosts.j2
dest: configs/{{ IP_subject_alt_name }}/known_hosts
become: no
- name: Fetch the known_hosts file - name: Build the client ssh config
local_action: local_action:
module: template module: template
src: known_hosts.j2 src: ssh_config.j2
dest: configs/{{ IP_subject_alt_name }}/known_hosts dest: configs/{{ IP_subject_alt_name }}/{{ item }}.ssh_config
become: no mode: 0600
become: no
- name: Build the client ssh config with_items:
local_action: - "{{ users }}"
module: template rescue:
src: ssh_config.j2 - debug: var=fail_hint
dest: configs/{{ IP_subject_alt_name }}/{{ item }}.ssh_config tags: always
mode: 0600 - fail:
become: no tags: always
with_items:
- "{{ users }}"

@ -1,31 +1,36 @@
--- ---
- block:
- name: Ensure that the strongswan group exist
group: name=strongswan state=present
- name: Ensure that the strongswan group exist - name: Ensure that the strongswan user exist
group: name=strongswan state=present user: name=strongswan group=strongswan state=present
- name: Ensure that the strongswan user exist - include: ubuntu.yml
user: name=strongswan group=strongswan state=present when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- include: ubuntu.yml - include: freebsd.yml
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' when: ansible_distribution == 'FreeBSD'
- include: freebsd.yml - name: Install strongSwan
when: ansible_distribution == 'FreeBSD' package: name=strongswan state=present
- name: Install strongSwan - name: Get StrongSwan versions
package: name=strongswan state=present shell: >
ipsec --versioncode | grep -oE "^U([0-9]*|\.)*" | sed "s/^U\|\.//g"
register: strongswan_version
- name: Get StrongSwan versions - include: ipec_configuration.yml
shell: > - include: openssl.yml
ipsec --versioncode | grep -oE "^U([0-9]*|\.)*" | sed "s/^U\|\.//g" - include: distribute_keys.yml
register: strongswan_version - include: client_configs.yml
- include: ipec_configuration.yml - meta: flush_handlers
- include: openssl.yml
- include: distribute_keys.yml
- include: client_configs.yml
- meta: flush_handlers - name: strongSwan started
service: name=strongswan state=started
- name: strongSwan started rescue:
service: name=strongswan state=started - debug: var=fail_hint
tags: always
- fail:
tags: always

@ -6,27 +6,33 @@
- config.cfg - config.cfg
tasks: tasks:
- name: Add the server to the vpn-host group - block:
add_host: - name: Add the server to the vpn-host group
hostname: "{{ server_ip }}" add_host:
groupname: vpn-host hostname: "{{ server_ip }}"
ansible_ssh_user: "{{ server_user }}" groupname: vpn-host
ansible_python_interpreter: "/usr/bin/python2.7" ansible_ssh_user: "{{ server_user }}"
ssh_tunneling_enabled: "{{ ssh_tunneling_enabled }}" ansible_python_interpreter: "/usr/bin/python2.7"
easyrsa_CA_password: "{{ easyrsa_CA_password }}" ssh_tunneling_enabled: "{{ ssh_tunneling_enabled }}"
IP_subject: "{{ IP_subject }}" easyrsa_CA_password: "{{ easyrsa_CA_password }}"
ansible_ssh_private_key_file: "{{ SSH_keys.private }}" IP_subject: "{{ IP_subject }}"
ansible_ssh_private_key_file: "{{ SSH_keys.private }}"
- name: Wait until SSH becomes ready...
local_action: - name: Wait until SSH becomes ready...
module: wait_for local_action:
port: 22 module: wait_for
host: "{{ server_ip }}" port: 22
search_regex: "OpenSSH" host: "{{ server_ip }}"
delay: 10 search_regex: "OpenSSH"
timeout: 320 delay: 10
state: present timeout: 320
become: false state: present
become: false
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
- name: User management - name: User management
hosts: vpn-host hosts: vpn-host
@ -37,171 +43,188 @@
- roles/vpn/defaults/main.yml - roles/vpn/defaults/main.yml
pre_tasks: pre_tasks:
- name: Common pre-tasks - block:
include: playbooks/common.yml - name: Common pre-tasks
include: playbooks/common.yml
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
roles: roles:
- { role: ssh_tunneling, tags: [ 'ssh_tunneling' ], when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y" } - { role: ssh_tunneling, tags: [ 'ssh_tunneling' ], when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y" }
tasks: tasks:
- block:
- name: Gather Facts - name: Gather Facts
setup: setup:
- name: Checking the signature algorithm - name: Checking the signature algorithm
local_action: > local_action: >
shell openssl x509 -text -in certs/{{ IP_subject_alt_name }}.crt | grep 'Signature Algorithm' | head -n1 shell openssl x509 -text -in certs/{{ IP_subject_alt_name }}.crt | grep 'Signature Algorithm' | head -n1
become: no become: no
register: sig_algo register: sig_algo
args: args:
chdir: "configs/{{ IP_subject_alt_name }}/pki/" chdir: "configs/{{ IP_subject_alt_name }}/pki/"
- name: Change the algorithm to RSA - name: Change the algorithm to RSA
set_fact: set_fact:
algo_params: "rsa:2048" algo_params: "rsa:2048"
when: '"ecdsa" not in sig_algo.stdout' when: '"ecdsa" not in sig_algo.stdout'
- name: Build the client's pair - name: Build the client's pair
local_action: > local_action: >
shell openssl req -utf8 -new -newkey {{ algo_params | default('ec:ecparams/prime256v1.pem') }} -config openssl.cnf -keyout private/{{ item }}.key -out reqs/{{ item }}.req -nodes -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" -batch && shell openssl req -utf8 -new -newkey {{ algo_params | default('ec:ecparams/prime256v1.pem') }} -config openssl.cnf -keyout private/{{ item }}.key -out reqs/{{ item }}.req -nodes -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" -batch &&
openssl ca -utf8 -in reqs/{{ item }}.req -out certs/{{ item }}.crt -config openssl.cnf -days 3650 -batch -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" && openssl ca -utf8 -in reqs/{{ item }}.req -out certs/{{ item }}.crt -config openssl.cnf -days 3650 -batch -passin pass:"{{ easyrsa_CA_password }}" -subj "/CN={{ item }}" &&
touch certs/{{ item }}_crt_generated touch certs/{{ item }}_crt_generated
become: no become: no
args: args:
chdir: "configs/{{ IP_subject_alt_name }}/pki/" chdir: "configs/{{ IP_subject_alt_name }}/pki/"
creates: certs/{{ item }}_crt_generated creates: certs/{{ item }}_crt_generated
environment: environment:
subjectAltName: "DNS:{{ item }}" subjectAltName: "DNS:{{ item }}"
with_items: "{{ users }}" with_items: "{{ users }}"
- name: Build the client's p12 - name: Build the client's p12
local_action: > local_action: >
shell openssl pkcs12 -in certs/{{ item }}.crt -inkey private/{{ item }}.key -export -name {{ item }} -out private/{{ item }}.p12 -certfile cacert.pem -passout pass:"{{ easyrsa_p12_export_password }}" shell openssl pkcs12 -in certs/{{ item }}.crt -inkey private/{{ item }}.key -export -name {{ item }} -out private/{{ item }}.p12 -certfile cacert.pem -passout pass:"{{ easyrsa_p12_export_password }}"
become: no become: no
args: args:
chdir: "configs/{{ IP_subject_alt_name }}/pki/" chdir: "configs/{{ IP_subject_alt_name }}/pki/"
with_items: "{{ users }}" with_items: "{{ users }}"
- name: Copy the p12 certificates - name: Copy the p12 certificates
local_action: local_action:
module: copy module: copy
src: "configs/{{ IP_subject_alt_name }}/pki/private/{{ item }}.p12" src: "configs/{{ IP_subject_alt_name }}/pki/private/{{ item }}.p12"
dest: "configs/{{ IP_subject_alt_name }}/{{ item }}.p12" dest: "configs/{{ IP_subject_alt_name }}/{{ item }}.p12"
mode: 0600 mode: 0600
become: no become: no
with_items: with_items:
- "{{ users }}" - "{{ users }}"
- name: Get active users - name: Get active users
local_action: > local_action: >
shell grep ^V index.txt | grep -v "{{ IP_subject_alt_name }}" | awk '{print $5}' | sed 's/\/CN=//g' shell grep ^V index.txt | grep -v "{{ IP_subject_alt_name }}" | awk '{print $5}' | sed 's/\/CN=//g'
become: no become: no
args: args:
chdir: "configs/{{ IP_subject_alt_name }}/pki/" chdir: "configs/{{ IP_subject_alt_name }}/pki/"
register: valid_certs register: valid_certs
- name: Revoke non-existing users - name: Revoke non-existing users
local_action: > local_action: >
shell openssl ca -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt && shell openssl ca -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt &&
openssl ca -gencrl -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt -out crl/{{ item }}.crt openssl ca -gencrl -config openssl.cnf -passin pass:"{{ easyrsa_CA_password }}" -revoke certs/{{ item }}.crt -out crl/{{ item }}.crt
touch crl/{{ item }}_revoked touch crl/{{ item }}_revoked
become: no become: no
args: args:
chdir: "configs/{{ IP_subject_alt_name }}/pki/" chdir: "configs/{{ IP_subject_alt_name }}/pki/"
creates: crl/{{ item }}_revoked creates: crl/{{ item }}_revoked
environment: environment:
subjectAltName: "DNS:{{ item }}" subjectAltName: "DNS:{{ item }}"
when: item not in users when: item not in users
with_items: "{{ valid_certs.stdout_lines }}" with_items: "{{ valid_certs.stdout_lines }}"
- name: Copy the revoked certificates to the vpn server - name: Copy the revoked certificates to the vpn server
copy: copy:
src: configs/{{ IP_subject_alt_name }}/pki/crl/{{ item }}.crt src: configs/{{ IP_subject_alt_name }}/pki/crl/{{ item }}.crt
dest: "{{ config_prefix|default('/') }}etc/ipsec.d/crls/{{ item }}.crt" dest: "{{ config_prefix|default('/') }}etc/ipsec.d/crls/{{ item }}.crt"
when: item not in users when: item not in users
with_items: "{{ valid_certs.stdout_lines }}" with_items: "{{ valid_certs.stdout_lines }}"
notify: notify:
- rereadcrls - rereadcrls
- name: Register p12 PayloadContent - name: Register p12 PayloadContent
local_action: > local_action: >
shell cat private/{{ item }}.p12 | base64 shell cat private/{{ item }}.p12 | base64
register: PayloadContent register: PayloadContent
become: no become: no
args: args:
chdir: "configs/{{ IP_subject_alt_name }}/pki/" chdir: "configs/{{ IP_subject_alt_name }}/pki/"
with_items: "{{ users }}" with_items: "{{ users }}"
- name: Set facts for mobileconfigs - name: Set facts for mobileconfigs
set_fact: set_fact:
proxy_enabled: false proxy_enabled: false
PayloadContentCA: "{{ lookup('file' , 'configs/{{ IP_subject_alt_name }}/pki/cacert.pem')|b64encode }}" PayloadContentCA: "{{ lookup('file' , 'configs/{{ IP_subject_alt_name }}/pki/cacert.pem')|b64encode }}"
- name: Build the mobileconfigs - name: Build the mobileconfigs
local_action: local_action:
module: template module: template
src: roles/vpn/templates/mobileconfig.j2 src: roles/vpn/templates/mobileconfig.j2
dest: configs/{{ IP_subject_alt_name }}/{{ item.0 }}.mobileconfig dest: configs/{{ IP_subject_alt_name }}/{{ item.0 }}.mobileconfig
mode: 0600 mode: 0600
become: no become: no
with_together: with_together:
- "{{ users }}" - "{{ users }}"
- "{{ PayloadContent.results }}" - "{{ PayloadContent.results }}"
no_log: True no_log: True
- name: Build the client ipsec config file - name: Build the client ipsec config file
local_action: local_action:
module: template module: template
src: roles/vpn/templates/client_ipsec.conf.j2 src: roles/vpn/templates/client_ipsec.conf.j2
dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.conf dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.conf
mode: 0600 mode: 0600
become: no become: no
with_items: with_items:
- "{{ users }}" - "{{ users }}"
- name: Build the client ipsec secret file - name: Build the client ipsec secret file
local_action: local_action:
module: template module: template
src: roles/vpn/templates/client_ipsec.secrets.j2 src: roles/vpn/templates/client_ipsec.secrets.j2
dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.secrets dest: configs/{{ IP_subject_alt_name }}/ipsec_{{ item }}.secrets
mode: 0600 mode: 0600
become: no become: no
with_items: with_items:
- "{{ users }}" - "{{ users }}"
- name: Build the windows client powershell script - name: Build the windows client powershell script
local_action: local_action:
module: template module: template
src: roles/vpn/templates/client_windows.ps1.j2 src: roles/vpn/templates/client_windows.ps1.j2
dest: configs/{{ IP_subject_alt_name }}/windows_{{ item }}.ps1 dest: configs/{{ IP_subject_alt_name }}/windows_{{ item }}.ps1
mode: 0600 mode: 0600
become: no become: no
when: Win10_Enabled is defined and Win10_Enabled == "Y" when: Win10_Enabled is defined and Win10_Enabled == "Y"
with_items: "{{ users }}" with_items: "{{ users }}"
# SSH # SSH
- name: SSH | Get active system users - name: SSH | Get active system users
shell: > shell: >
getent group algo | cut -f4 -d: | sed "s/,/\n/g" getent group algo | cut -f4 -d: | sed "s/,/\n/g"
register: valid_users register: valid_users
when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y" when: ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
- name: SSH | Delete non-existing users - name: SSH | Delete non-existing users
user: user:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
remove: yes remove: yes
force: yes force: yes
when: item not in users and ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y" when: item not in users and ssh_tunneling_enabled is defined and ssh_tunneling_enabled == "y"
with_items: "{{ valid_users.stdout_lines | default('null') }}" with_items: "{{ valid_users.stdout_lines | default('null') }}"
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
post_tasks: post_tasks:
- debug: - block:
msg: - debug:
- "{{ congrats.common.split('\n') }}" msg:
- " {{ congrats.p12_pass }}" - "{{ congrats.common.split('\n') }}"
tags: always - " {{ congrats.p12_pass }}"
tags: always
rescue:
- debug: var=fail_hint
tags: always
- fail:
tags: always
handlers: handlers:
- name: rereadcrls - name: rereadcrls

Loading…
Cancel
Save