From f6e459e0ead6c1f87430aabcdb5f71a11fe389c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sofus=20Albert=20H=C3=B8gsbro=20Rose?= Date: Mon, 21 Aug 2023 09:03:34 +0200 Subject: [PATCH] refactor!: Playbooks into reusable roles. Also solving several issues along the way. Progress on #18. Closes #15. Closes #13. Closes #10. Closes #16. --- .dockerignore | 35 ----- README.md | 60 ++++++-- ansible.cfg | 3 + inventory.yml | 99 +++++++------ playbook.yml | 70 +++++++++ playbooks/playbook.hosts.yml | 136 ------------------ playbooks/playbook.swarm.yml | 132 ----------------- playbooks/playbook.systems.yml | 10 -- playbooks/playbook.wg0.yml | 43 ------ playbooks/templates/99-wg0.netdev | 19 --- requirements.txt | 19 +++ roles/host/docker/tasks/main.yml | 27 ++++ roles/host/docker_swarm_leader/tasks/main.yml | 32 +++++ roles/host/docker_swarm_worker/tasks/main.yml | 23 +++ roles/host/network_wg0/handlers/main.yml | 5 + roles/host/network_wg0/tasks/main.yml | 67 +++++++++ .../host/network_wg0/templates/99-wg0.netdev | 19 +++ .../network_wg0}/templates/99-wg0.network | 0 roles/host/system_tools/tasks/main.yml | 4 + roles/setup/dns_foundation/defaults/main.yml | 1 + roles/setup/dns_foundation/tasks/main.yml | 55 +++++++ roles/setup/dns_foundation/vars/main.yml | 4 + roles/setup/hosts_digitalocean/README.md | 2 + .../hosts_digitalocean/defaults/main.yml | 11 ++ roles/setup/hosts_digitalocean/tasks/main.yml | 81 +++++++++++ roles/setup/hosts_digitalocean/vars/main.yml | 4 + roles/stack/deploy_configs/defaults/main.yml | 3 + roles/stack/deploy_configs/tasks/main.yml | 34 +++++ roles/stack/deploy_dns/tasks/main.yml | 28 ++++ roles/stack/deploy_dns/vars/main.yml | 4 + .../deploy_network_overlay/tasks/main.yml | 29 ++++ roles/stack/deploy_stack/defaults/main.yml | 1 + roles/stack/deploy_stack/handlers/main.yml | 5 + roles/stack/deploy_stack/tasks/main.yml | 18 +++ .../stack/deploy_volume_s3/defaults/main.yml | 65 +++++++++ roles/stack/deploy_volume_s3/tasks/main.yml | 64 +++++++++ .../deploy_volume_s3/templates/rclone.conf | 15 ++ .../deploy_volume_s3/templates/rclone.service | 16 +++ run.sh | 103 ++++--------- stacks/.gitignore | 6 - stacks/cleanup/playbook.yml | 37 ++--- .../configs/mesh__site-support__service.toml | 10 ++ stacks/mesh/configs/mesh__stack_chat.toml | 0 stacks/mesh/configs/mesh__stack_git.toml | 0 stacks/mesh/configs/mesh__traefik_static.toml | 12 +- stacks/mesh/docker-compose.yml | 14 +- stacks/mesh/playbook.yml | 135 +++-------------- stacks/site-support/SECURITY.md | 4 +- .../mesh__site-support__service.toml} | 2 +- stacks/site-support/docker-compose.yml | 28 ++-- stacks/site-support/playbook.yml | 87 ++++------- 51 files changed, 930 insertions(+), 751 deletions(-) delete mode 100644 .dockerignore create mode 100644 ansible.cfg create mode 100644 playbook.yml delete mode 100644 playbooks/playbook.hosts.yml delete mode 100644 playbooks/playbook.swarm.yml delete mode 100644 playbooks/playbook.systems.yml delete mode 100644 playbooks/playbook.wg0.yml delete mode 100644 playbooks/templates/99-wg0.netdev create mode 100644 requirements.txt create mode 100644 roles/host/docker/tasks/main.yml create mode 100644 roles/host/docker_swarm_leader/tasks/main.yml create mode 100644 roles/host/docker_swarm_worker/tasks/main.yml create mode 100644 roles/host/network_wg0/handlers/main.yml create mode 100644 roles/host/network_wg0/tasks/main.yml create mode 100644 roles/host/network_wg0/templates/99-wg0.netdev rename {playbooks => roles/host/network_wg0}/templates/99-wg0.network (100%) create mode 100644 roles/host/system_tools/tasks/main.yml create mode 100644 roles/setup/dns_foundation/defaults/main.yml create mode 100644 roles/setup/dns_foundation/tasks/main.yml create mode 100644 roles/setup/dns_foundation/vars/main.yml create mode 100644 roles/setup/hosts_digitalocean/README.md create mode 100644 roles/setup/hosts_digitalocean/defaults/main.yml create mode 100644 roles/setup/hosts_digitalocean/tasks/main.yml create mode 100644 roles/setup/hosts_digitalocean/vars/main.yml create mode 100644 roles/stack/deploy_configs/defaults/main.yml create mode 100644 roles/stack/deploy_configs/tasks/main.yml create mode 100644 roles/stack/deploy_dns/tasks/main.yml create mode 100644 roles/stack/deploy_dns/vars/main.yml create mode 100644 roles/stack/deploy_network_overlay/tasks/main.yml create mode 100644 roles/stack/deploy_stack/defaults/main.yml create mode 100644 roles/stack/deploy_stack/handlers/main.yml create mode 100644 roles/stack/deploy_stack/tasks/main.yml create mode 100644 roles/stack/deploy_volume_s3/defaults/main.yml create mode 100644 roles/stack/deploy_volume_s3/tasks/main.yml create mode 100644 roles/stack/deploy_volume_s3/templates/rclone.conf create mode 100644 roles/stack/deploy_volume_s3/templates/rclone.service delete mode 100644 stacks/.gitignore create mode 100644 stacks/mesh/configs/mesh__site-support__service.toml delete mode 100644 stacks/mesh/configs/mesh__stack_chat.toml delete mode 100644 stacks/mesh/configs/mesh__stack_git.toml rename stacks/{mesh/configs/mesh__stack_site-support.toml => site-support/configs_mesh/mesh__site-support__service.toml} (90%) diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 8a732ea..0000000 --- a/.dockerignore +++ /dev/null @@ -1,35 +0,0 @@ - -# You may want to customise this file depending on your Operating System -# and the editor that you use. -# -# We recommend that you use a Global Gitignore for files that are not related -# to the project. (https://help.github.com/articles/ignoring-files/#create-a-global-gitignore) - -# OS -# -# Ref: https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -# Ref: https://github.com/github/gitignore/blob/master/Global/Windows.gitignore -# Ref: https://github.com/github/gitignore/blob/master/Global/Linux.gitignore -.DS_STORE -Thumbs.db - -# Editors -# -# Ref: https://github.com/github/gitignore/blob/master/Global -# Ref: https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore -# Ref: https://github.com/github/gitignore/blob/master/Global/VisualStudioCode.gitignore -.idea -.chrome -/*.log -.vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json - -# Python -**/__pycache__ -.venv - -# Local Developer Notes -dev diff --git a/README.md b/README.md index 59971b5..2c93120 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,63 @@ # Complete Infrastructure for DTU Python Support +This goal of this project is to describe and implement the complete infrastructure for DTUs Python Support group. **Very heavily WIP** -This project describes and implements the complete infrastructure for DTUs Python Support group. +## Project Goals +The ordered list of priorities are: +1. **Security/privacy**: It should address all major security concerns and take general good-practice steps to mitigate general issues. +2. **Reliability**: It should "just work", and keep "just work"ing until someone tells it otherwise. +3. **Developer usability**: It should be understandable and deployable with minimal human-to-human explanation. +4. **Resource/cost efficiency**: It should be surrounded by minimal effective infrastructure, and run on the cheapest hardware that supports the application use case. -The repository provides the following user-facing services: -- timesigned.com: Modern, multilingual guide to using Python at DTU. +That is to say: +- In a tradeoff between security and reliability, we will generally prefer security. This has a hard limit; note that **convenience is security**, and reliability is one of the finest conveniences that exist. +- In a tradeoff between reliability and dev usability, we will generally prefer reliability. This is a more subjective choice; deployment problems are categorically "hard", and "reliable" can very quickly come to mean "unusable to most". +- And so on... + +## Deployed Services +The following user-facing services are provided: +- pysupport.timesigned.com: Modern, multilingual guide to using Python at DTU. - SSG with [mdbook](https://rust-lang.github.io/mdBook/) w/plugins. - - chat.timesigned.com: Modern asynchronous communication and support channel for everybody using Python at DTU. - Instance of [Zulip](https://zulip.com/). - -- git.timesigned.com: Lightweight collaborative development for teams +- git.timesigned.com: Lightweight collaborative development and project management infrastructure for development teams. - Instance of [Forgejo](https://forgejo.org/), itself a soft-fork of [Gitea](https://about.gitea.com/) - - auth.timesigned.com: Identity Provider allowing seamless, secure access to key services with their DTU Account. - Instance of [Authentik](https://goauthentik.io/). - -- uptime.timesigned.com: Black-box monitoring with notifications +- uptime.timesigned.com: Black-box monitoring with operational notifications. - Instance of [Authentik](https://goauthentik.io/). +## Architecture +To achieve our goals, we choose the following basic bricks to play with: +- `docker swarm`: A (flawed, but principled) orchestrator with batteries included. +- `wireguard`: Encrypted L3 overlay network with no overhead. The perfect companion to any orchestrator. +- `ansible`: Expresses desired infrastructure state as YML. Better treated as pseudo-scripts that are guaranteed (\*) safe to re-run. + +In practice, here are some of the key considerations in the architecture: +- **Prefer configs/secrets**: We always prefer mounted secrets/configs, which are not subject to persistence headaches, are protected by Raft consensus, and are immune to runtime modifications. + - **Our Approach**: We vehemently disallow secrets in the stack environment; when this is incompatible with the application, we use an entrypoint script to inject the environment variable from the docker secret file when calling the app. + +- **No `docker.sock`**: Access (even read-only) to `docker.sock` implicitly grants the container in question root access to the host. + - **Our Approach**: Use of `docker.sock` is reserved for pseudo-`cronjob` replacements; that is to say, deterministic, simple, easily vettable processes that are critical for host security. + +- **Rootless Container Internals**: The docker socket itself must be rootful in Swarm. This is a calculated risk, for which immense ease of use (**convenience is security!!**) and container-level security (specifically, managing when a container actually does get access to something sensitive) can be bought as managed `iptables` (especially effective over `wg0`), simple `CAP_DROP`, `cgroup` definitions, etc. . With a certain discipline, one gets a lot in return. + - **Our Approach**: We build infrastructure around containerized deployments (to manage ex. ownership and permissions) to ensure that unique UID:GIDs can run processes within containers without overlap. We actively prefer services that allow doing this, and are willing to resort to ex. entrypoint hacking to make rootless operation possible. We also take care to go beyond default Docker security CAP policies, aspiring to always run `CAP_DROP: ALL` by default, and then either manually `CAP_ADD` back or configuring the container process to not need the capability. + +- **Encrypted `overlay`**: Docker `overlay` networks are principally not more secure than the network they're built in: Prone to Active/Passive MITM, MAC/IP spoofs, ARP cache poisoning, and so on. + - **Our Approach**: We build an encrypted L3 network with minimal overhead, using the `wireguard` kernel module via `systemd-networkd`. This enforces that Swarm communications happen over the `wg0` interface, without having to maintain a pile of scripts outside the main system. This eliminates MITM risk, and ensures that when `overlay` networks defining peers by their IP can trust that IP address. + - **NOTE on Key Generation**: We pre-generate all keys into our secret store (`password-store`), *including pre-shared keys*. This is extremely secure, but it's also a... Heavy way to do it (a PK problem). $100$ nodes would require generating and distributing $10100$ keys. We will never have more than 5 nodes, though. + +- **Reproducible Deployment**: Swarm deployments rely on a lot of external stuff: Availability of hosts, correct DNS records, shared attachable `overlay` networks with static IPs and hostnames for connected containers, volumes backed in various ways, configs/secrets with possible rotation, and so on. + - **Our Approach**: We aspire to encode the requisitioning of all required resources into the **single-source-of-truth deployment path**. In practice, this takes the form of an Ansible project; one tied especially closely to the contents of `docker-compose.yml` stack files. + +### Why not `x`? +- `k8s`/`k3s`/...: Unfortunately, the heaviness and complexity on a small team makes it break all of the four concerns. One can use cloud provider infrastructure, but then privacy (and cost!) becomes a risk. +- HashiCorp `x`: Terraform, Nomad, Vault, etc. are no longer free (as in freedom) software, and even if they still were, generally imply buy-in to the whole ecosystem. + # References +To dig deeper and/or develop this infrastructure. ## Wireguard / systemd-networkd - `systemd-networkd` Network: @@ -53,8 +89,12 @@ The repository provides the following user-facing services: - S3 Backend: - Crypt Meta-Backend: - ## Swarm Deployment - The Funky Penguin: - Traefik Certificate Auto-Renewal: - Traefik Service: + +## Docker Networking +- Friends, Scopes Matter: + - `overlay` networks **require** `scope=global` when used the way we use it. + - Note, don't run other containers on hosts that you don't want able to connect to these overlay networks. diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..911870f --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] +roles_path = ./roles +host_key_checking = False diff --git a/inventory.yml b/inventory.yml index 828da5d..3bee8b7 100644 --- a/inventory.yml +++ b/inventory.yml @@ -1,65 +1,76 @@ +#################### +# - Global Variables +#################### +all: + vars: + passwordstore: "./dev/.password-store" + stacks_dir: "./stacks" + + project_name: "python-support" + dns_root: "timesigned.com" + email_abuse: "s174509@dtu.dk" + #################### # - Hosts - by Purpose #################### -service: +purpose_service: hosts: - raspberry.node.timesigned.com: - vars: - ansible_user: root + raspberry.node: -storage: +purpose_storage: hosts: - blueberry.node.timesigned.com: - vars: - ansible_user: root + blueberry.node: #################### # - Hosts - by Swarm Role #################### -leader: - ## ONLY ==1 Host can be Leader +swarm_leader: + ## ONLY ==1 Host should be Leader hosts: - raspberry.node.timesigned.com: - vars: - ansible_user: root + raspberry.node: -manager: +swarm_managers: hosts: - raspberry.node.timesigned.com: - vars: - ansible_user: root + raspberry.node: -worker: +swarm_workers: hosts: - blueberry.node.timesigned.com: - vars: - ansible_user: root + blueberry.node: -swarm: - hosts: - raspberry.node.timesigned.com: - blueberry.node.timesigned.com: +swarm_nodes: vars: - ansible_user: root - -#################### -# - Hosts - by L3 Network -#################### -wg0: + ansible_user: "root" + hosts: - raspberry.node.timesigned.com: + raspberry.node: + ansible_host: "raspberry.node.{{ dns_root }}" + wg0_ip: "10.9.8.1" + wg0_private_key: "{{ lookup( + 'community.general.passwordstore', + 'networks/wg0/raspberry.node/private_key' + ) }}" + wg0_public_key: "{{ lookup( + 'community.general.passwordstore', + 'networks/wg0/raspberry.node/public_key' + ) }}" + wg0_psk_blueberry.node: "{{ lookup( + 'community.general.passwordstore', + 'networks/wg0/raspberry.node/psk_blueberry.node' + ) }}" + blueberry.node: + ansible_host: "blueberry.node.{{ dns_root }}" - wg_private_key: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/wg/raspberry_private_key') }}" - wg_public_key: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/wg/raspberry_public_key') }}" - - wg_psk_blueberry.node.timesigned.com: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/wg/psk_raspberry-blueberry') }}" - blueberry.node.timesigned.com: wg0_ip: "10.9.8.2" - - wg_private_key: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/wg/blueberry_private_key') }}" - wg_public_key: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/wg/blueberry_public_key') }}" - - wg_psk_raspberry.node.timesigned.com: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/wg/psk_raspberry-blueberry') }}" - vars: - ansible_user: root + wg0_private_key: "{{ lookup( + 'community.general.passwordstore', + 'networks/wg0/blueberry.node/private_key' + ) }}" + wg0_public_key: "{{ lookup( + 'community.general.passwordstore', + 'networks/wg0/blueberry.node/public_key' + ) }}" + wg0_psk_raspberry.node: "{{ lookup( + 'community.general.passwordstore', + 'networks/wg0/raspberry.node/psk_blueberry.node' + ) }}" diff --git a/playbook.yml b/playbook.yml new file mode 100644 index 0000000..d7286c6 --- /dev/null +++ b/playbook.yml @@ -0,0 +1,70 @@ +#################### +# - Setup +#################### +- name: "Allocate Hosts w/DNS" + tags: + - "stage_setup" + hosts: "localhost" + vars: + do_project: "{{ project_name }}" + do_project_purpose: "Infrastructure for the Python Support Team." + + roles: + - role: "setup/hosts_digitalocean" + vars: + hosts_do: "{{ groups['purpose_storage'] }}" + ## SET: nodes_to_ipv4s_public@localhost + ## SET: nodes_to_ipv4s_private@localhost + + - role: "setup/hosts_digitalocean" + vars: + hosts_do: "{{ groups['purpose_service'] }}" + ## SET: nodes_to_ipv4s_public@localhost + ## SET: nodes_to_ipv4s_private@localhost + + - role: "setup/dns_foundation" + vars: + ipv4_root: "{{ nodes_to_ipv4s_public['raspberry.node'] }}" + +#################### +# - Setup Hosts +#################### +- name: "Configure Hosts" + hosts: "swarm_nodes" + tags: + - "stage_host" + roles: + - role: "host/system_tools" + + - role: "host/network_wg0" + vars: + hosts_wg0: "{{ groups['swarm_nodes'] }}" + + - role: "host/docker" + +- name: "Configure Docker Swarm Leader" + hosts: "swarm_leader" + tags: + - "stage_host" + roles: + - role: "host/docker_swarm_leader" + ## SET: swarm_manager_token@swarm_leader + ## SET: swarm_worker_token@swarm_leader + +- name: "Configure Docker Swarm Workers" + hosts: "swarm_workers" + tags: + - "stage_host" + roles: + - role: "host/docker_swarm_worker" + vars: + host_swarm_leader: "{{ groups['swarm_leader'][0] }}" + + + +#################### +# - Deploy Stacks +#################### +- import_playbook: "./stacks/mesh/playbook.yml" +- import_playbook: "./stacks/site-support/playbook.yml" +- import_playbook: "./stacks/cleanup/playbook.yml" diff --git a/playbooks/playbook.hosts.yml b/playbooks/playbook.hosts.yml deleted file mode 100644 index d281bcd..0000000 --- a/playbooks/playbook.hosts.yml +++ /dev/null @@ -1,136 +0,0 @@ -- hosts: localhost - vars: - dns_root: "timesigned.com" - node_primary: "raspberry.node.timesigned.com" - - digitalocean_droplet_token: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/digitalocean-droplet-token') }}" - - cloudflare_email: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/cloudflare-email') }}" - cloudflare_dns_token: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/cloudflare-dns-token') }}" - - droplet_service_image: "debian-12-x64" - ## curl -X GET --silent "https://api.digitalocean.com/v2/images?per_page=999" -H "Authorization: Bearer $(pass work/dtu/python-support/digitalocean-droplet-token)" | jq | less - droplet_service_size: "s-1vcpu-1gb" - droplet_service_region: "fra1" - ## curl -X GET --silent "https://api.digitalocean.com/v2/sizes?per_page=999" -H "Authorization: Bearer $(pass work/dtu/python-support/digitalocean-droplet-token)" | jq | less - - droplet_storage_image: "debian-12-x64" - droplet_storage_size: "s-1vcpu-1gb" - droplet_storage_region: "fra1" - - tasks: - #################### - # - Prepare SSH Information - #################### - - name: "Get SSH Public Key" - shell: "ssh-add -L" - register: "ssh_key_pub_cmdout" - - - name: "Add SSH Public Key to DigitalOcean account" - digital_ocean_sshkey: - name: "key" - oauth_token: "{{ digitalocean_droplet_token }}" - ssh_pub_key: "{{ ssh_key_pub_cmdout.stdout }}" - state: "present" - register: "sshkey_result" - - #################### - # - Create Digitalocean Nodes - #################### - - name: "Create Storage Droplet" - digital_ocean_droplet: - name: "{{ item }}" - oauth_token: "{{ digitalocean_droplet_token }}" - ssh_keys: ["{{ sshkey_result.data.ssh_key.id }}"] - - image: "{{ droplet_storage_image }}" - size: "{{ droplet_storage_size }}" - region: "{{ droplet_storage_region }}" - - wait_timeout: 600 - unique_name: "yes" - - state: present - with_inventory_hostnames: - - storage - register: droplet_storage_result - - - name: "Create Service Droplet" - digital_ocean_droplet: - name: "{{ item }}" - oauth_token: "{{ digitalocean_droplet_token }}" - ssh_keys: ["{{ sshkey_result.data.ssh_key.id }}"] - - image: "{{ droplet_service_image }}" - size: "{{ droplet_service_size }}" - region: "{{ droplet_service_region }}" - - wait_timeout: 600 - unique_name: "yes" - - state: present - with_inventory_hostnames: - - service - register: droplet_service_result - - #################### - # - Set DNS A Records => Hosts - #################### - - name: "Set Storage DNS A => *.node.{{ dns_root }}" - cloudflare_dns: - api_token: "{{ cloudflare_dns_token }}" - - zone: "{{ dns_root }}" - type: "A" - - record: "{{ item.data.droplet.name }}" - value: "{{ item.data.ip_address }}" - with_items: "{{ droplet_storage_result.results }}" - - - name: "Set Service DNS A => *.node.{{ dns_root }}" - cloudflare_dns: - api_token: "{{ cloudflare_dns_token }}" - - zone: "{{ dns_root }}" - type: "A" - - record: "{{ item.data.droplet.name }}" - value: "{{ item.data.ip_address }}" - with_items: "{{ droplet_service_result.results }}" - - #################### - # - Set DNS CNAME Record => @ - #################### - - name: "Set DNS CNAME => Primary Node" - cloudflare_dns: - api_token: "{{ cloudflare_dns_token }}" - - zone: "{{ dns_root }}" - type: "CNAME" - - record: "@" - value: "{{ node_primary }}" - ## Cloudflare allows CNAME on @ via CNAME-flattening - - #################### - # - Set DNS CNAME Records => Stacks - #################### - - name: "Set DNS CNAME => Stack: auth" - cloudflare_dns: - api_token: "{{ cloudflare_dns_token }}" - - zone: "{{ dns_root }}" - type: "CNAME" - - record: "auth" - value: "@" - - - name: "Set DNS CNAME => Stack: site-support" - cloudflare_dns: - api_token: "{{ cloudflare_dns_token }}" - - zone: "{{ dns_root }}" - type: "CNAME" - - record: "pysupport" - value: "@" diff --git a/playbooks/playbook.swarm.yml b/playbooks/playbook.swarm.yml deleted file mode 100644 index 3106dd2..0000000 --- a/playbooks/playbook.swarm.yml +++ /dev/null @@ -1,132 +0,0 @@ -- hosts: swarm - become: "true" - tasks: - #################### - # - Tuning - Traefik - # -- Traefik serving QUIC can be bottlenecked by a too-low UDP buffer. - # -- This increases both send & receive from ~200KB to 2.5MB. - #################### - - name: "Set net.core.rmem_max = 2500000" - sysctl: - state: "present" - name: "net.core.rmem_max" - value: "2500000" - reload: "yes" - - - name: "Set net.core.wmem_max = 2500000" - sysctl: - state: "present" - name: "net.core.rmem_max" - value: "2500000" - reload: "yes" - - #################### - # - Docker - Install - #################### - - name: "Download Docker Apt Key" - ansible.builtin.get_url: - url: "https://download.docker.com/linux/debian/gpg" - dest: "/etc/apt/trusted.gpg.d/docker.asc" - checksum: "sha256:1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570" - owner: "root" - group: "root" - mode: "644" - - - name: "Add Docker Apt Repository" - apt_repository: - state: "present" - repo: "deb https://download.docker.com/linux/debian bullseye stable" - filename: "docker" - - - name: "Install Docker CE" - apt: - state: "present" - name: "docker-ce" - - - name: "Install python3-docker" - apt: - state: "present" - name: "python3-docker" - - #################### - # - Docker Plugin - rclone - #################### - - name: "Install fuse" - apt: - state: "present" - name: "fuse" - - - name: "Create rclone Config Path" - ansible.builtin.file: - path: "/var/lib/docker-plugins/rclone/config" - state: directory - mode: "0750" - - - name: "Create rclone Cache Path" - ansible.builtin.file: - path: "/var/lib/docker-plugins/rclone/cache" - state: directory - mode: "0750" - -# - name: "Disable the rclone Docker Plugin" -# community.docker.docker_plugin: -# state: "disable" -# alias: "rclone" -# plugin_name: "rclone/docker-volume-rclone:amd64" - - - name: "Install rclone Docker Plugin" - community.docker.docker_plugin: - state: "present" - alias: "rclone" - plugin_name: "rclone/docker-volume-rclone:amd64" - plugin_options: - args: "-v --allow-other" - - - name: "Enable the rclone Docker Plugin" - community.docker.docker_plugin: - state: "enable" - alias: "rclone" - plugin_name: "rclone/docker-volume-rclone:amd64" - plugin_options: - args: "-v --allow-other" - -#################### -# - Docker - Swarm Init -#################### -- hosts: leader - become: "true" - tasks: - - name: "Initialize Docker Swarm Leader" - community.docker.docker_swarm: - state: "present" - advertise_addr: "{{ wg0_ip }}" - listen_addr: "{{ wg0_ip }}:2377" - - - name: "Collect Swarm Info" - community.docker.docker_swarm_info: - register: swarm_info - - - name: "Retrieve Join Tokens" - set_fact: - swarm_manager_token: "{{ swarm_info.swarm_facts['JoinTokens']['Manager'] }}" - swarm_worker_token: "{{ swarm_info.swarm_facts['JoinTokens']['Worker'] }}" - - - name: "Install jsondiff & pyyaml (stack-deploy deps)" - apt: - state: "present" - name: - - "python3-jsondiff" - - "python3-yaml" - -# SKIP Manager -# - Currently, there is only one manager == leader. So there's no point. - -- hosts: worker - become: "true" - tasks: - - name: "Initialize Docker Swarm Workers" - community.docker.docker_swarm: - state: "join" - advertise_addr: "{{ wg0_ip }}" - join_token: "{{ hostvars[groups['leader'][0]]['swarm_worker_token'] }}" - remote_addrs: [ "{{ hostvars[groups['leader'][0]]['wg0_ip'] }}:2377" ] diff --git a/playbooks/playbook.systems.yml b/playbooks/playbook.systems.yml deleted file mode 100644 index 9522b30..0000000 --- a/playbooks/playbook.systems.yml +++ /dev/null @@ -1,10 +0,0 @@ -- hosts: swarm - become: "true" - tasks: - #################### - # - Tuning - Dev - #################### - - name: "Install Terminfo for Kitty" - ansible.builtin.apt: - state: "present" - name: "kitty-terminfo" diff --git a/playbooks/playbook.wg0.yml b/playbooks/playbook.wg0.yml deleted file mode 100644 index a605e5e..0000000 --- a/playbooks/playbook.wg0.yml +++ /dev/null @@ -1,43 +0,0 @@ -- hosts: wg0 - become: "true" - tasks: - #################### - # - Wireguard - #################### - - name: "Install Wireguard Tools" - ansible.builtin.apt: - state: "present" - name: "wireguard" - - - name: "systemd-networkd: Install wg0 Device" - template: - src: "./templates/99-wg0.netdev" - dest: "/etc/systemd/network/99-wg0.netdev" - owner: "root" - group: "systemd-network" - mode: "0640" - - - name: "systemd-networkd: Install wg0 Network" - template: - src: "./templates/99-wg0.network" - dest: "/etc/systemd/network/99-wg0.network" - owner: "root" - group: "systemd-network" - mode: "0640" - - - name: "Restart systemd-networkd" - systemd: - name: "systemd-networkd.service" - state: "restarted" - - #################### - # - Wireguard - Enable Packet Forwarding - #################### - - name: "Set net.ipv4.ip_forward = 1" - sysctl: - state: "present" - name: "net.ipv4.ip_forward" - value: "1" - reload: "yes" - - diff --git a/playbooks/templates/99-wg0.netdev b/playbooks/templates/99-wg0.netdev deleted file mode 100644 index ca54ff7..0000000 --- a/playbooks/templates/99-wg0.netdev +++ /dev/null @@ -1,19 +0,0 @@ -[NetDev] -Name=wg0 -Kind=wireguard -Description=WireGuard tunnel wg0 - -[WireGuard] -ListenPort=51871 -PrivateKey={{ wg_private_key }} - -{% for item in groups['wg0'] %} -{% if item != inventory_hostname %} -[WireGuardPeer] -PublicKey={{ hostvars[item]['wg_public_key'] }} -PresharedKey={{ hostvars[item]['wg_psk_' ~ inventory_hostname] }} -AllowedIPs={{ hostvars[item]['wg0_ip'] }}/32 -Endpoint={{ item }}:51871 - -{% endif %} -{% endfor %} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..1caa77a --- /dev/null +++ b/requirements.txt @@ -0,0 +1,19 @@ +ansible==8.3.0 +ansible-core==2.15.3 +boto3==1.28.30 +botocore==1.31.30 +cffi==1.15.1 +cryptography==41.0.3 +dnspython==2.4.2 +importlib-resources==5.0.7 +Jinja2==3.1.2 +jmespath==1.0.1 +MarkupSafe==2.1.3 +packaging==23.1 +pycparser==2.21 +python-dateutil==2.8.2 +PyYAML==6.0.1 +resolvelib==1.0.1 +s3transfer==0.6.2 +six==1.16.0 +urllib3==1.26.16 diff --git a/roles/host/docker/tasks/main.yml b/roles/host/docker/tasks/main.yml new file mode 100644 index 0000000..dbc33d8 --- /dev/null +++ b/roles/host/docker/tasks/main.yml @@ -0,0 +1,27 @@ +#################### +# - Docker - Install +#################### +- name: "Download Docker Apt Key" + ansible.builtin.get_url: + url: "https://download.docker.com/linux/debian/gpg" + dest: "/etc/apt/trusted.gpg.d/docker.asc" + checksum: "sha256:1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570" + owner: "root" + group: "root" + mode: "644" + +- name: "Add Docker Apt Repository" + apt_repository: + state: "present" + repo: "deb https://download.docker.com/linux/debian bookworm stable" + filename: "docker" + +- name: "Install Docker CE" + apt: + state: "present" + name: "docker-ce" + +- name: "Install python3-docker" + apt: + state: "present" + name: "python3-docker" diff --git a/roles/host/docker_swarm_leader/tasks/main.yml b/roles/host/docker_swarm_leader/tasks/main.yml new file mode 100644 index 0000000..45bf0f8 --- /dev/null +++ b/roles/host/docker_swarm_leader/tasks/main.yml @@ -0,0 +1,32 @@ +#################### +# - Check Variables +#################### +- name: "[Host] Check that mandatory variables are defined" + assert: + that: + - "wg0_ip is defined" + +#################### +# - Check Variables +#################### +- name: "Initialize Docker Swarm Leader" + community.docker.docker_swarm: + state: "present" + advertise_addr: "{{ wg0_ip }}" + listen_addr: "{{ wg0_ip }}:2377" + +- name: "Collect Swarm Info" + community.docker.docker_swarm_info: + register: swarm_info + +- name: "Retrieve Join Tokens" + set_fact: + swarm_manager_token: "{{ swarm_info.swarm_facts['JoinTokens']['Manager'] }}" + swarm_worker_token: "{{ swarm_info.swarm_facts['JoinTokens']['Worker'] }}" + +- name: "Install jsondiff & pyyaml (stack-deploy deps)" + apt: + state: "present" + name: + - "python3-jsondiff" + - "python3-yaml" diff --git a/roles/host/docker_swarm_worker/tasks/main.yml b/roles/host/docker_swarm_worker/tasks/main.yml new file mode 100644 index 0000000..5c7b3e3 --- /dev/null +++ b/roles/host/docker_swarm_worker/tasks/main.yml @@ -0,0 +1,23 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check that mandatory variables are defined" + assert: + that: + - "host_swarm_leader is defined" + +- name: "[Host][host_swarm_leader] Check that mandatory variables are defined" + assert: + that: + - "'swarm_worker_token' in hostvars[host_swarm_leader]" + - "'wg0_ip' in hostvars[host_swarm_leader]" + +#################### +# - Initialize Workers +#################### +- name: "Initialize Docker Swarm Workers" + community.docker.docker_swarm: + state: "join" + advertise_addr: "{{ wg0_ip }}" + join_token: "{{ hostvars[host_swarm_leader]['swarm_worker_token'] }}" + remote_addrs: [ "{{ hostvars[host_swarm_leader]['wg0_ip'] }}:2377" ] diff --git a/roles/host/network_wg0/handlers/main.yml b/roles/host/network_wg0/handlers/main.yml new file mode 100644 index 0000000..55ff1f3 --- /dev/null +++ b/roles/host/network_wg0/handlers/main.yml @@ -0,0 +1,5 @@ +- name: "Restart systemd-networkd" + systemd: + name: "systemd-networkd.service" + state: "restarted" + listen: "restart systemd-networkd" diff --git a/roles/host/network_wg0/tasks/main.yml b/roles/host/network_wg0/tasks/main.yml new file mode 100644 index 0000000..186e8a6 --- /dev/null +++ b/roles/host/network_wg0/tasks/main.yml @@ -0,0 +1,67 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check Variables" + assert: + that: + - "hosts_wg0 is defined" + +- name: "[Host][localhost] Check Variables" + assert: + that: + - "hostvars['localhost'].nodes_to_ipv4s_private is defined" + +- name: "[Host] Check Variables" + assert: + that: + - "wg0_private_key is defined" + - "wg0_public_key is defined" + - "wg0_ip is defined" + with_items: "{{ hosts_wg0 }}" + +- name: "[Special][Inter-Host PSKs] Check Variables" + assert: + that: + - "'wg0_psk_' ~ item is defined" + with_items: "{{ hosts_wg0 }}" + when: "item != inventory_hostname" + +#################### +# - Wireguard +#################### +- name: "Install Wireguard Tools" + ansible.builtin.apt: + state: "present" + name: "wireguard" + +- name: "systemd-networkd: Install wg0 Device" + template: + src: "{{ role_path }}/templates/99-wg0.netdev" + dest: "/etc/systemd/network/99-wg0.netdev" + owner: "root" + group: "systemd-network" + mode: "0640" + notify: "restart systemd-networkd" + +- name: "systemd-networkd: Install wg0 Network" + template: + src: "{{ role_path }}/templates/99-wg0.network" + dest: "/etc/systemd/network/99-wg0.network" + owner: "root" + group: "systemd-network" + mode: "0640" + notify: "restart systemd-networkd" + +#################### +# - Wireguard - Enable Packet Forwarding +#################### +- name: "Set net.ipv4.ip_forward = 1" + sysctl: + state: "present" + name: "net.ipv4.ip_forward" + value: "1" + reload: "yes" + notify: "restart systemd-networkd" + +- name: "Run Notified Handlers" + meta: "flush_handlers" diff --git a/roles/host/network_wg0/templates/99-wg0.netdev b/roles/host/network_wg0/templates/99-wg0.netdev new file mode 100644 index 0000000..8ddacfb --- /dev/null +++ b/roles/host/network_wg0/templates/99-wg0.netdev @@ -0,0 +1,19 @@ +[NetDev] +Name=wg0 +Kind=wireguard +Description=WireGuard tunnel wg0 + +[WireGuard] +ListenPort=51871 +PrivateKey={{ wg0_private_key }} + +{% for item in hosts_wg0 %} +{% if item != inventory_hostname %} +[WireGuardPeer] +PublicKey={{ hostvars[item].wg0_public_key }} +PresharedKey={{ hostvars[item]['wg0_psk_' ~ inventory_hostname] }} +AllowedIPs={{ hostvars[item].wg0_ip }}/32 +Endpoint={{ hostvars['localhost'].nodes_to_ipv4s_private[item] }}:51871 + +{% endif %} +{% endfor %} diff --git a/playbooks/templates/99-wg0.network b/roles/host/network_wg0/templates/99-wg0.network similarity index 100% rename from playbooks/templates/99-wg0.network rename to roles/host/network_wg0/templates/99-wg0.network diff --git a/roles/host/system_tools/tasks/main.yml b/roles/host/system_tools/tasks/main.yml new file mode 100644 index 0000000..739f45b --- /dev/null +++ b/roles/host/system_tools/tasks/main.yml @@ -0,0 +1,4 @@ +- name: "Install Terminfo for Kitty" + ansible.builtin.apt: + state: "present" + name: "kitty-terminfo" diff --git a/roles/setup/dns_foundation/defaults/main.yml b/roles/setup/dns_foundation/defaults/main.yml new file mode 100644 index 0000000..87fcbf6 --- /dev/null +++ b/roles/setup/dns_foundation/defaults/main.yml @@ -0,0 +1 @@ +dns_root: "timesigned.com" diff --git a/roles/setup/dns_foundation/tasks/main.yml b/roles/setup/dns_foundation/tasks/main.yml new file mode 100644 index 0000000..0a08a9d --- /dev/null +++ b/roles/setup/dns_foundation/tasks/main.yml @@ -0,0 +1,55 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check Variables" + assert: + that: + - "nodes_to_ipv4s_public is defined" + - "ipv4_root is defined" + +#################### +# - Set DNS A Records => Hosts +#################### +- name: "Set Node DNS A => *.node.{{ dns_root }}" + cloudflare_dns: + api_token: "{{ cloudflare_dns_token }}" + + zone: "{{ dns_root }}" + type: "A" + solo: true + + record: "{{ item.key }}" + value: "{{ item.value }}" + with_dict: "{{ nodes_to_ipv4s_public }}" + +#################### +# - Set DNS A Record => @ +#################### +- name: "Set DNS A => Primary Node" + cloudflare_dns: + api_token: "{{ cloudflare_dns_token }}" + + zone: "{{ dns_root }}" + type: "A" + solo: true + + record: "@" + value: "{{ ipv4_root }}" + +- name: "Wait for Node DNS Propagation" + debug: + msg: "Waiting..." + until: "lookup( + 'community.general.dig', + item.key ~ '.' ~ dns_root + ) == item.value" + retries: 30 + delay: 10 + with_dict: "{{ nodes_to_ipv4s_public }}" + +- name: "Wait for Primary DNS Propagation" + debug: + msg: "Waiting..." + until: "lookup('community.general.dig', dns_root) == ipv4_root" + retries: 30 + delay: 10 diff --git a/roles/setup/dns_foundation/vars/main.yml b/roles/setup/dns_foundation/vars/main.yml new file mode 100644 index 0000000..5b68a03 --- /dev/null +++ b/roles/setup/dns_foundation/vars/main.yml @@ -0,0 +1,4 @@ +cloudflare_dns_token: "{{ lookup( + 'community.general.passwordstore', + 'cloudflare/dns-token' +) }}" diff --git a/roles/setup/hosts_digitalocean/README.md b/roles/setup/hosts_digitalocean/README.md new file mode 100644 index 0000000..14e4da3 --- /dev/null +++ b/roles/setup/hosts_digitalocean/README.md @@ -0,0 +1,2 @@ +## Sets +- `nodes_to_ipv4s |=` on `localhost`: DigitalOcean Droplet IPs, indexed by inventory hostname. diff --git a/roles/setup/hosts_digitalocean/defaults/main.yml b/roles/setup/hosts_digitalocean/defaults/main.yml new file mode 100644 index 0000000..b55c891 --- /dev/null +++ b/roles/setup/hosts_digitalocean/defaults/main.yml @@ -0,0 +1,11 @@ +droplet_image: "debian-12-x64" +droplet_size: "s-1vcpu-1gb" +droplet_region: "fra1" + +ssh_key_pub: "{{ lookup('ansible.builtin.pipe', 'ssh-add -L') }}" + +## Get Image + ## curl -X GET --silent "https://api.digitalocean.com/v2/images?per_page=999" -H "Authorization: Bearer " | jq | less + +## Get Sizes + ## curl -X GET --silent "https://api.digitalocean.com/v2/sizes?per_page=999" -H "Authorization: Bearer " | jq | less diff --git a/roles/setup/hosts_digitalocean/tasks/main.yml b/roles/setup/hosts_digitalocean/tasks/main.yml new file mode 100644 index 0000000..69a246e --- /dev/null +++ b/roles/setup/hosts_digitalocean/tasks/main.yml @@ -0,0 +1,81 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check Variables" + assert: + that: + - "hosts_do is defined" + - "do_project is defined" + - "do_project_purpose is defined" + +#################### +# - Prepare SSH Information +#################### +- name: "Add SSH Public Key to DO Account" + digital_ocean_sshkey: + state: "present" + + name: "{{ ssh_key_pub.split(' ')[-1] }}" + oauth_token: "{{ digitalocean_droplet_token }}" + ssh_pub_key: "{{ ssh_key_pub }}" + register: "do_sshkey_result" + +#################### +# - Create Digitalocean Project +#################### +- name: "Create DO Project: {{ do_project }}" + run_once: true + community.digitalocean.digital_ocean_project: + state: "present" + + name: "{{ do_project }}" + oauth_token: "{{ digitalocean_droplet_token }}" + purpose: "{{ do_project_purpose }}" + +#################### +# - Create Digitalocean Nodes +#################### +- name: "Create Droplets" + digital_ocean_droplet: + state: "present" + + name: "{{ item }}" + oauth_token: "{{ digitalocean_droplet_token }}" + ssh_keys: ["{{ do_sshkey_result.data.ssh_key.id }}"] + + image: "{{ droplet_image }}" + size: "{{ droplet_size }}" + region: "{{ droplet_region }}" + + project: "{{ do_project }}" + wait_timeout: 600 + unique_name: "yes" + + with_items: "{{ hosts_do }}" + register: "droplet_result" + +- name: "Register Droplet IPs" + set_fact: + nodes_to_ipv4s_public: "{{ + nodes_to_ipv4s_public + | default({}) + | combine({ + item.data.droplet.name: ( + item.data.droplet.networks.v4 + | selectattr('type', 'eq', 'public') + | first + ).ip_address, + }) + }}" + nodes_to_ipv4s_private: "{{ + nodes_to_ipv4s_private + | default({}) + | combine({ + item.data.droplet.name: ( + item.data.droplet.networks.v4 + | selectattr('type', 'eq', 'private') + | first + ).ip_address, + }) + }}" + with_items: "{{ droplet_result.results }}" diff --git a/roles/setup/hosts_digitalocean/vars/main.yml b/roles/setup/hosts_digitalocean/vars/main.yml new file mode 100644 index 0000000..74dc51c --- /dev/null +++ b/roles/setup/hosts_digitalocean/vars/main.yml @@ -0,0 +1,4 @@ +digitalocean_droplet_token: "{{ lookup( + 'community.general.passwordstore', + 'digitalocean/droplet-token', +) }}" diff --git a/roles/stack/deploy_configs/defaults/main.yml b/roles/stack/deploy_configs/defaults/main.yml new file mode 100644 index 0000000..8b7a8df --- /dev/null +++ b/roles/stack/deploy_configs/defaults/main.yml @@ -0,0 +1,3 @@ +stack_dir: "{{ playbook_dir }}" +stack_config_paths: "{{ lookup('fileglob', stack_dir ~ '/configs/*').split(',') }}" +stack_configs: "{{ stack_config_paths | map('basename') | list }}" diff --git a/roles/stack/deploy_configs/tasks/main.yml b/roles/stack/deploy_configs/tasks/main.yml new file mode 100644 index 0000000..891e5de --- /dev/null +++ b/roles/stack/deploy_configs/tasks/main.yml @@ -0,0 +1,34 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check that mandatory variables are defined" + assert: + that: + - "stack_name is defined" + +#################### +# - Deploy Configs +#################### +- name: "Stop Stack: {{ stack_name}}" + run_once: true + community.docker.docker_stack: + state: "absent" + + name: "{{ stack_name }}" + absent_retries: 15 + +- name: "Wait for Stack to Stop" + run_once: true + shell: "until [ -z $(docker stack ps {{ stack_name }} -q) ]; do sleep 1; done" + +- name: "Wait for Stack Networks to Stop" + run_once: true + shell: "until [ -z $(docker stack ps {{ stack_name }} -q) ]; do sleep 1; done" + +- name: "Create Docker Configs" + community.docker.docker_config: + state: "present" + name: "{{ item }}" + data: "{{ lookup('template', stack_dir ~ '/configs/' ~ item) | b64encode }}" + data_is_b64: "true" + with_items: "{{ stack_configs }}" diff --git a/roles/stack/deploy_dns/tasks/main.yml b/roles/stack/deploy_dns/tasks/main.yml new file mode 100644 index 0000000..2d5a5dd --- /dev/null +++ b/roles/stack/deploy_dns/tasks/main.yml @@ -0,0 +1,28 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check that mandatory variables are defined" + assert: + that: + - "domain is defined" + - "domain_to is defined" + +#################### +# - Set DNS CNAME Record => @ +#################### +- name: "Set DNS CNAME {{ domain_to }} => {{ domain_to }}" + cloudflare_dns: + api_token: "{{ cloudflare_dns_token }}" + + zone: "{{ dns_root }}" + type: "CNAME" + + record: "{{ domain }}" + value: "{{ domain_to }}" + +- name: "Wait for DNS Propagation" + debug: + msg: "Waiting..." + until: "lookup('community.general.dig', domain) == lookup('community.general.dig', domain_to)" + retries: 30 + delay: 10 diff --git a/roles/stack/deploy_dns/vars/main.yml b/roles/stack/deploy_dns/vars/main.yml new file mode 100644 index 0000000..5b68a03 --- /dev/null +++ b/roles/stack/deploy_dns/vars/main.yml @@ -0,0 +1,4 @@ +cloudflare_dns_token: "{{ lookup( + 'community.general.passwordstore', + 'cloudflare/dns-token' +) }}" diff --git a/roles/stack/deploy_network_overlay/tasks/main.yml b/roles/stack/deploy_network_overlay/tasks/main.yml new file mode 100644 index 0000000..55beeff --- /dev/null +++ b/roles/stack/deploy_network_overlay/tasks/main.yml @@ -0,0 +1,29 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check that mandatory variables are defined" + assert: + that: + - "network_name is defined" + +#################### +# - Network Creation +#################### +- name: "Retrieve {{ network_name }} Info" + community.docker.docker_network_info: + name: "{{ network_name }}" + register: result + +- name: "Create {{ network_name }}" + run_once: true + community.docker.docker_network: + state: "present" + + name: "{{ network_name }}" + driver: "overlay" + scope: "global" + + attachable: true + appends: true + when: "not result.exists" + diff --git a/roles/stack/deploy_stack/defaults/main.yml b/roles/stack/deploy_stack/defaults/main.yml new file mode 100644 index 0000000..8c0ea8d --- /dev/null +++ b/roles/stack/deploy_stack/defaults/main.yml @@ -0,0 +1 @@ +stack_dir: "{{ playbook_dir }}" diff --git a/roles/stack/deploy_stack/handlers/main.yml b/roles/stack/deploy_stack/handlers/main.yml new file mode 100644 index 0000000..4a49adc --- /dev/null +++ b/roles/stack/deploy_stack/handlers/main.yml @@ -0,0 +1,5 @@ +- name: "Redeploy Stack " + systemd: + name: "systemd-networkd.service" + state: "restarted" + listen: "restart systemd-networkd" diff --git a/roles/stack/deploy_stack/tasks/main.yml b/roles/stack/deploy_stack/tasks/main.yml new file mode 100644 index 0000000..1c451a1 --- /dev/null +++ b/roles/stack/deploy_stack/tasks/main.yml @@ -0,0 +1,18 @@ +#################### +# - Check Variables +#################### +- name: "[Play] Check that mandatory variables are defined" + assert: + that: + - "stack_name is defined" + +#################### +# - Stack Deployment +#################### +- name: "Deploy Stack: {{ stack_name }}" + community.docker.docker_stack: + state: "present" + prune: "true" + name: "{{ stack_name }}" + compose: + - "{{ lookup('file', stack_dir ~ '/docker-compose.yml') | from_yaml }}" diff --git a/roles/stack/deploy_volume_s3/defaults/main.yml b/roles/stack/deploy_volume_s3/defaults/main.yml new file mode 100644 index 0000000..4c70ef4 --- /dev/null +++ b/roles/stack/deploy_volume_s3/defaults/main.yml @@ -0,0 +1,65 @@ +# S3 Master Credentials +cloudflare_account_id: "{{ lookup( + 'community.general.passwordstore', + 'cloudflare/account-id' +) }}" +s3_master_access_key_id: "{{ lookup( + 'community.general.passwordstore', + 'cloudflare/r2/s3_access_key_id' +) }}" +s3_master_secret_access_key: "{{ lookup( + 'community.general.passwordstore', + 'cloudflare/r2/s3_secret_access_key' +) }}" +s3_master_endpoint: "https://{{ cloudflare_account_id }}.r2.cloudflarestorage.com" + +# S3 Bucket Info +s3_bucket_name: "{{ volume_name | replace('_', '-') }}" +s3_access_key_id: "{{ lookup( + 'community.general.passwordstore', + 'volumes/' ~ volume_name ~ '/s3_access_key_id' +) }}" +s3_secret_access_key: "{{ lookup( + 'community.general.passwordstore', + 'volumes/' ~ volume_name ~ '/s3_secret_access_key' +) }}" +s3_endpoint: "{{ s3_master_endpoint }}/{{ s3_bucket_name }}" +s3_acl: "private" + +# Volume Dirs / Files +dir_volume_base: "/data/volumes/{{ volume_name }}" +dir_volume_cache: "{{ dir_volume_base }}/cache" +dir_volume_mount: "{{ dir_volume_base }}/data" +file_rclone_config: "{{ dir_volume_base }}/rclone.conf" + +# rclone Encryption Options +rclone_enckey_1: "{{ lookup( + 'community.general.passwordstore', + 'volumes/' ~ volume_name ~ '/rclone_enckey_1' +) }}" +rclone_enckey_2: "{{ lookup( + 'community.general.passwordstore', + 'volumes/' ~ volume_name ~ '/rclone_enckey_2' +) }}" + +# rclone Config/Permissions +perms_uid: "0" +perms_gid: "0" +perms_dir: "0777" +perms_files: "0666" +perms_umask: "2" +vfs_cache_mode: "full" + +rclone_mount_opts: "{{ + '--config ' ~ file_rclone_config + ~ ' --cache-dir ' ~ dir_volume_cache + ~ ' --default-permissions' + ~ ' --allow-other' + ~ ' --uid ' ~ perms_uid + ~ ' --gid ' ~ perms_gid + ~ ' --dir-perms ' ~ perms_dir + ~ ' --file-perms ' ~ perms_files + ~ ' --umask ' ~ perms_umask + ~ ' --gid ' ~ perms_gid + ~ ' --vfs-cache-mode ' ~ vfs_cache_mode +}}" diff --git a/roles/stack/deploy_volume_s3/tasks/main.yml b/roles/stack/deploy_volume_s3/tasks/main.yml new file mode 100644 index 0000000..13987d3 --- /dev/null +++ b/roles/stack/deploy_volume_s3/tasks/main.yml @@ -0,0 +1,64 @@ +# Install rclone +- name: "Install rclone & fuse" + run_once: true + apt: + state: "present" + name: + - "rclone" + - "fuse" + +# Create S3 Bucket +- name: "Create S3 Bucket" + local_action: + module: "amazon.aws.s3_bucket" + state: "present" + + name: "{{ s3_bucket_name }}" + + access_key: "{{ s3_master_access_key_id }}" + secret_key: "{{ s3_master_secret_access_key }}" + endpoint_url: "{{ s3_master_endpoint }}" + #s3_url: "{{ s3_master_endpoint }}" + +# Create Volume Directories +- name: "Create S3-Backed Volume Base Directory" + file: + state: "directory" + path: "{{ dir_volume_base }}" + mode: "0700" + +- name: "Create S3-Backed Volume Cache Directory" + file: + state: "directory" + path: "{{ dir_volume_cache }}" + mode: "0700" + +- name: "Create S3-Backed Volume Data Directory" + file: + state: "directory" + path: "{{ dir_volume_mount }}" + mode: "0700" + +# Install Volume-Mount Service +- name: "Install rclone.conf" + template: + src: "{{ role_path }}/templates/rclone.conf" + dest: "{{ file_rclone_config }}" + owner: "root" + group: "root" + mode: "0600" + +- name: "Install rclone-{{ volume_name }}.service" + template: + src: "{{ role_path }}/templates/rclone.service" + dest: "/etc/systemd/system/rclone-{{ volume_name }}.service" + owner: "root" + group: "root" + mode: "0600" + +- name: "Start rclone-{{ volume_name }}.service" + systemd: + state: "started" + enabled: true + name: "rclone-{{ volume_name }}" + daemon_reload: "yes" diff --git a/roles/stack/deploy_volume_s3/templates/rclone.conf b/roles/stack/deploy_volume_s3/templates/rclone.conf new file mode 100644 index 0000000..da6923c --- /dev/null +++ b/roles/stack/deploy_volume_s3/templates/rclone.conf @@ -0,0 +1,15 @@ +[{{volume_name}}-insecure] +type = s3 +provider = Other +env_auth = false +access_key_id = {{ s3_access_key_id }} +secret_access_key = {{ s3_secret_access_key }} +region = auto +endpoint = {{ s3_endpoint }} +acl = {{ s3_acl }} + +[{{volume_name}}] +type = crypt +remote = {{ volume_name }}-insecure:{{ s3_bucket_name }} +password = {{ rclone_enckey_1 }} +password2 = {{ rclone_enckey_2 }} diff --git a/roles/stack/deploy_volume_s3/templates/rclone.service b/roles/stack/deploy_volume_s3/templates/rclone.service new file mode 100644 index 0000000..029daca --- /dev/null +++ b/roles/stack/deploy_volume_s3/templates/rclone.service @@ -0,0 +1,16 @@ +[Unit] +Description=rclone_s3 - {{ volume_name }} +AssertPathIsDirectory={{ dir_volume_mount }} +After=network.target + +[Service] +Type=simple + +ExecStart=/usr/bin/rclone mount {{ rclone_mount_opts }} {{ volume_name }}: {{ dir_volume_mount }} +ExecStop=/usr/bin/fusermount -zu {{ dir_volume_mount }} + +Restart=on-failure +RestartSec=10 + +[Install] +WantedBy=default.target diff --git a/run.sh b/run.sh index 8f6fa24..236a845 100755 --- a/run.sh +++ b/run.sh @@ -7,17 +7,8 @@ SCRIPT_PATH="$(dirname "$(readlink -f "$0")")" #################### # - Constants #################### -PLAYBOOKS_PATH="$SCRIPT_PATH/playbooks" - INVENTORY="$SCRIPT_PATH/inventory.yml" - -PLAYBOOK_HOSTS="$PLAYBOOKS_PATH/playbook.hosts.yml" -PLAYBOOK_WG0="$PLAYBOOKS_PATH/playbook.wg0.yml" -PLAYBOOK_SWARM="$PLAYBOOKS_PATH/playbook.swarm.yml" - -PLAYBOOK_STACK_CLEANUP="$SCRIPT_PATH/stacks/cleanup/playbook.yml" -PLAYBOOK_STACK_MESH="$SCRIPT_PATH/stacks/mesh/playbook.yml" -PLAYBOOK_STACK_SITE_SUPPORT="$SCRIPT_PATH/stacks/site-support/playbook.yml" +PLAYBOOK="$SCRIPT_PATH/playbook.yml" help() { less -R << EOF @@ -25,6 +16,10 @@ This script manages the deployment using ansible. Usage: ./run.sh [COMMAND] + +Commands: + sync [TAGS] + - Specify comma-seperated TAGS to restrict execution to particular stages/stacks. EOF } @@ -68,47 +63,32 @@ case $(cat /etc/debian_version | cut -d . -f 1) in ;; esac -if [[ $(cmd_exists ansible) != true ]]; then - echo "This script requires ansible. Press ENTER to install and continue..." - sudo apt install ansible +if [ ! -d "$SCRIPT_PATH/.venv" ]; then + python3 -m venv .venv +fi + +. .venv/bin/activate + +if [[ $(cmd_exists ansible) != true ]]; then + pip install -r "$SCRIPT_PATH/requirements.txt" - echo "This script requires latest community.docker module. Press ENTER to install and continue..." ansible-galaxy collection install community.docker + ansible-galaxy collection install community.digitalocean fi #################### # - Actions #################### -action_hosts() { +action_sync() { ansible-playbook \ --inventory "$INVENTORY" \ - "$PLAYBOOK_HOSTS" + "$PLAYBOOK" } -action_wg0() { +action_sync_tags() { ansible-playbook \ --inventory "$INVENTORY" \ - "$PLAYBOOK_WG0" -} -action_swarm() { - ansible-playbook \ - --inventory "$INVENTORY" \ - "$PLAYBOOK_SWARM" -} - -action_stack_cleanup() { - ansible-playbook \ - --inventory "$INVENTORY" \ - "$PLAYBOOK_STACK_CLEANUP" -} -action_stack_mesh() { - ansible-playbook \ - --inventory "$INVENTORY" \ - "$PLAYBOOK_STACK_MESH" -} -action_stack_site_support() { - ansible-playbook \ - --inventory "$INVENTORY" \ - "$PLAYBOOK_STACK_SITE_SUPPORT" + "$PLAYBOOK" \ + --tags "$1" } #################### @@ -116,45 +96,10 @@ action_stack_site_support() { #################### case $1 in sync) - action_hosts - action_wg0 - action_swarm - - action_stack_cleanup - action_stack_mesh - action_stack_site_support + if [ -z "${2-}" ]; then + action_sync + else + action_sync_tags "$2" + fi ;; - - sync-hosts) - action_hosts - ;; - sync-wg0) - action_wg0 - ;; - sync-swarm) - action_swarm - ;; - - sync-stacks) - action_stack_cleanup - action_stack_mesh - action_stack_site_support - ;; - sync-stack-cleanup) - action_stack_cleanup - ;; - sync-stack-mesh) - action_stack_mesh - ;; - sync-stack-site-support) - action_stack_site_support - ;; - -# sync-role) -# ansible-playbook \ -# --inventory "$INVENTORY" \ -# --tags "$2" \ -# "$PLAYBOOK" -# ;; - esac diff --git a/stacks/.gitignore b/stacks/.gitignore deleted file mode 100644 index 80a9a45..0000000 --- a/stacks/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -auth -chat -git -s3 -updater -uptime diff --git a/stacks/cleanup/playbook.yml b/stacks/cleanup/playbook.yml index 44b22b3..c99f40d 100644 --- a/stacks/cleanup/playbook.yml +++ b/stacks/cleanup/playbook.yml @@ -1,28 +1,13 @@ -- hosts: leader - become: "true" +#################### +# - Deploy Stack: cleanup +#################### +- name: "Deploy Stack: cleanup" + hosts: "swarm_leader" + tags: + - "stage_stack" + - "stage_stack_cleanup" vars: stack_name: "cleanup" - tasks: - #################### - # - Stack Deployment - #################### - - name: "Upload Stack to /tmp" - template: - src: "./docker-compose.yml" - dest: "/tmp/{{ stack_name }}.yml" - owner: "root" - group: "root" - mode: "0640" - - - name: "Deploy Stack: {{ stack_name }}" - community.docker.docker_stack: - state: "present" - prune: "true" - name: "{{ stack_name }}" - compose: - - "/tmp/{{ stack_name }}.yml" - - - name: "Delete /tmp Stack" - ansible.builtin.file: - path: "/tmp/{{ stack_name }}.yml" - state: "absent" + + roles: + - role: "stack/deploy_stack" diff --git a/stacks/mesh/configs/mesh__site-support__service.toml b/stacks/mesh/configs/mesh__site-support__service.toml new file mode 100644 index 0000000..804e46d --- /dev/null +++ b/stacks/mesh/configs/mesh__site-support__service.toml @@ -0,0 +1,10 @@ +[http.routers.site-support__site-support] +rule = "Host(`pysupport.timesigned.com`)" +entryPoints = ["websecure", "web"] +service = "site-support__site-support" + +[[http.services.site-support__site-support.loadBalancer.servers]] +url = "http://site-support.site-support:8787" + +[http.routers.site-support__site-support.tls] +certResolver = "letsencrypt" diff --git a/stacks/mesh/configs/mesh__stack_chat.toml b/stacks/mesh/configs/mesh__stack_chat.toml deleted file mode 100644 index e69de29..0000000 diff --git a/stacks/mesh/configs/mesh__stack_git.toml b/stacks/mesh/configs/mesh__stack_git.toml deleted file mode 100644 index e69de29..0000000 diff --git a/stacks/mesh/configs/mesh__traefik_static.toml b/stacks/mesh/configs/mesh__traefik_static.toml index 46f1713..05e5a00 100644 --- a/stacks/mesh/configs/mesh__traefik_static.toml +++ b/stacks/mesh/configs/mesh__traefik_static.toml @@ -5,17 +5,15 @@ checkNewVersion = false sendAnonymousUsage = false -[experimental] -http3 = true - [api] dashboard = false insecure = false debug = false -disabledashboardad = true [log] -level = "DEBUG" +level = "INFO" + +[accessLog] @@ -26,7 +24,7 @@ level = "DEBUG" [certificatesResolvers.letsencrypt.acme] email = "{{ email_letsencrypt }}" storage = "/data-certs/acme.json" -#caServer = "https://acme-staging-v02.api.letsencrypt.org/directory" +caServer = "https://acme-staging-v02.api.letsencrypt.org/directory" [certificatesResolvers.letsencrypt.acme.tlsChallenge] @@ -41,7 +39,6 @@ storage = "/data-certs/acme.json" [entryPoints.websecure] address = ":443" -http3.advertisedPort = 443 [entryPoints.web] @@ -60,4 +57,3 @@ permanent = true [providers.file] directory = "/data-providers" watch = false -debugLogGeneratedTemplate = true diff --git a/stacks/mesh/docker-compose.yml b/stacks/mesh/docker-compose.yml index c1952ee..9d32853 100644 --- a/stacks/mesh/docker-compose.yml +++ b/stacks/mesh/docker-compose.yml @@ -21,7 +21,7 @@ services: uid: "5000" gid: "5000" - - source: mesh__stack_site-support.toml + - source: mesh__site-support__service.toml target: /data-providers/site-support.toml uid: "5000" gid: "5000" @@ -30,7 +30,7 @@ services: - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro - - mesh__traefik_certs:/data-certs + - /data/volumes/mesh__traefik_certs/data:/data-certs:shared ports: ## HTTP @@ -65,7 +65,9 @@ services: - node.role == manager networks: - - mesh_public + mesh_public: + aliases: + - "traefik.mesh" #################### # - Resources @@ -77,11 +79,7 @@ configs: external: true mesh__traefik_default_middlewares.toml: external: true - mesh__stack_site-support.toml: - external: true - -volumes: - mesh__traefik_certs: + mesh__site-support__service.toml: external: true networks: diff --git a/stacks/mesh/playbook.yml b/stacks/mesh/playbook.yml index 70b2f1b..1293ede 100644 --- a/stacks/mesh/playbook.yml +++ b/stacks/mesh/playbook.yml @@ -1,120 +1,25 @@ -#################### -# - Stop the Stack -#################### -- hosts: leader - become: "true" +#################### +# - Deploy Stack: mesh +#################### +- name: "Deploy Stack: mesh" + hosts: "swarm_leader" + tags: + - "stage_stack" + - "stage_stack_mesh" vars: stack_name: "mesh" - tasks: - - name: "Stop Stack: {{ stack_name }}" - community.docker.docker_stack: - state: "absent" - absent_retries: 15 - name: "{{ stack_name }}" - - - name: "Pause to Let Stack Stop" - pause: - seconds: 5 - - -#################### -# - Volume Creation -#################### -- hosts: swarm - become: "true" - vars: - cloudflare_b0__access_key_id: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/r2/mesh__traefik_certs/access_key_id') }}" - cloudflare_b0__secret_access_key: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/r2/mesh__traefik_certs/secret_access_key') }}" - cloudflare_b0__endpoint: "{{ lookup('community.general.passwordstore', 'work/dtu/python-support/r2/mesh__traefik_certs/endpoint') }}" + email_letsencrypt: "{{ email_abuse }}" - tasks: - - name: "Unmount Volume: mesh__traefik_certs" - community.docker.docker_volume: - state: "absent" - name: "mesh__traefik_certs" - driver: "rclone" - - - name: "Pause to Let Volume Unmount" - pause: - seconds: 5 - - - name: "Mount Volume: mesh__traefik_certs" - community.docker.docker_volume: - state: "present" - name: "mesh__traefik_certs" - driver: "rclone" - driver_options: - remote: ":s3:mesh--traefik-certs" - uid: "5000" - gid: "5000" - s3_provider: "Cloudflare" - s3_access_key_id: "{{ cloudflare_b0__access_key_id }}" - s3_secret_access_key: "{{ cloudflare_b0__secret_access_key }}" - s3_region: "auto" - s3_endpoint: "{{ cloudflare_b0__endpoint }}" - s3_acl: "private" - vfs_cache_mode: "full" - -#################### -# - Deployment -#################### -- hosts: leader - become: "true" + roles: +- role: "stack/deploy_network_overlay" vars: - email_letsencrypt: "s174509@dtu.dk" + network_name: "mesh_public" - stack_name: "mesh" - stack_configs: - - "mesh__traefik_static.toml" - - "mesh__traefik_tls.toml" - - "mesh__traefik_default_middlewares.toml" - - "mesh__stack_site-support.toml" - - tasks: - #################### - # - Network Creation - #################### - - name: "Create Network: mesh_public" - community.docker.docker_network: - state: "present" - name: "mesh_public" - driver: "overlay" - scope: "swarm" - attachable: true - appends: true - - - #################### - # - Configs Creation - #################### - - name: "Create Docker Configs" - community.docker.docker_config: - state: "present" - name: "{{ item }}" - data: "{{ lookup('template', './configs/' ~ item) | b64encode }}" - data_is_b64: "true" - with_items: "{{ stack_configs }}" - - #################### - # - Stack Deployment - #################### - - name: "Upload Stack to /tmp" - template: - src: "./docker-compose.yml" - dest: "/tmp/{{ stack_name }}.yml" - owner: "root" - group: "root" - mode: "0640" - - - name: "Deploy Stack: {{ stack_name }}" - community.docker.docker_stack: - state: "present" - prune: "true" - name: "{{ stack_name }}" - compose: - - "/tmp/{{ stack_name }}.yml" - - - name: "Delete /tmp Stack" - ansible.builtin.file: - path: "/tmp/{{ stack_name }}.yml" - state: "absent" +- role: "stack/deploy_volume_s3" + vars: + volume_name: "mesh__traefik_certs" + perms_uid: "5000" + perms_gid: "5000" + +- role: "stack/deploy_configs" + - role: "stack/deploy_stack" diff --git a/stacks/site-support/SECURITY.md b/stacks/site-support/SECURITY.md index 626eb5c..8f72b11 100644 --- a/stacks/site-support/SECURITY.md +++ b/stacks/site-support/SECURITY.md @@ -25,9 +25,7 @@ The service employs CPU/Memory usage limits in the `deploy` section. This helps prevent a DDoS attack from crashing the entire host. ## Capabilities -All capabilities are dropped with `--cap_drop ALL`. - -No capabilities need to be added back, so none are. +The container runs with default capabilities. ## security.txt *See https://securitytxt.org/ for RFC + generator.* diff --git a/stacks/mesh/configs/mesh__stack_site-support.toml b/stacks/site-support/configs_mesh/mesh__site-support__service.toml similarity index 90% rename from stacks/mesh/configs/mesh__stack_site-support.toml rename to stacks/site-support/configs_mesh/mesh__site-support__service.toml index 58bf3c0..f3a013c 100644 --- a/stacks/mesh/configs/mesh__stack_site-support.toml +++ b/stacks/site-support/configs_mesh/mesh__site-support__service.toml @@ -4,7 +4,7 @@ entryPoints = ["websecure", "web"] service = "site-support__site-support" [[http.services.site-support__site-support.loadBalancer.servers]] -url = "http://site-support:8787" +url = "http://10.99.88.3:8787" [http.routers.site-support__site-support.tls] certResolver = "letsencrypt" diff --git a/stacks/site-support/docker-compose.yml b/stacks/site-support/docker-compose.yml index a089e7e..c5b20af 100644 --- a/stacks/site-support/docker-compose.yml +++ b/stacks/site-support/docker-compose.yml @@ -4,8 +4,6 @@ services: site-support: image: git.sofus.io/python-support/site-support:0 user: "5020:5020" - cap_drop: - - ALL volumes: - /etc/localtime:/etc/localtime:ro @@ -13,34 +11,31 @@ services: configs: - source: site-support__security.txt - target: /public/.well-known/security.txt - uid: "5020" - gid: "5020" + target: /app/.well-known/security.txt environment: SERVER_PORT: "8787" + SERVER_ROOT: "/app" SERVER_REDIRECT_TRAILING_SLASH: "true" - SERVER_LOG_LEVEL: "info" + SERVER_LOG_LEVEL: "trace" SERVER_LOG_REMOTE_ADDRESS: "false" - - SERVER_THREADS_MULTIPLIER: "0" ## Use # CPUs - SERVER_SECURITY_HEADERS: "true" + SERVER_SECURITY_HEADERS: "false" SERVER_DIRECTORY_LISTING: "false" SERVER_CACHE_CONTROL_HEADERS: "false" ## change when stable? - SERVER_COMPRESSION: "true" ## reconsider for small ssg payload + SERVER_COMPRESSION: "false" ## reconsider for small ssg payload SERVER_COMPRESSION_STATIC: "false" ## pre-compress? :) deploy: mode: replicated replicas: 1 -# resources: -# limits: -# cpus: "4.0" -# memory: "4G" + resources: + limits: + cpus: "1.0" + memory: "750M" restart_policy: condition: on-failure @@ -49,12 +44,13 @@ services: window: 120s networks: - - mesh_public + public: + ipv4_address: "10.99.88.3" configs: site-support__security.txt: external: true networks: - mesh_public: + public: external: true diff --git a/stacks/site-support/playbook.yml b/stacks/site-support/playbook.yml index e0f7da6..6ce27f1 100644 --- a/stacks/site-support/playbook.yml +++ b/stacks/site-support/playbook.yml @@ -1,66 +1,33 @@ -#################### -# - Deployment -#################### -- hosts: leader - become: "true" +#################### +# - Deploy Stack: site-support +#################### +- name: "Deploy Stack: site-support" + hosts: "swarm_leader" + tags: + - "stage_stack" + - "stage_stack_site-support" vars: stack_name: "site-support" - stack_configs: - - "site-support__security.txt" - tasks: - #################### - # - Stop the Stack - #################### - - name: "Stop Stack: {{ stack_name }}" - community.docker.docker_stack: - state: "absent" - absent_retries: 15 - name: "{{ stack_name }}" + roles: + - role: "stack/deploy_dns" + vars: + domain: "pysupport.{{ dns_root }}" + domain_to: "{{ dns_root }}" - #################### - # - Network Creation - #################### - - name: "Create Network: mesh_public" - community.docker.docker_network: - state: "present" - name: "mesh_public" - driver: "overlay" - scope: "swarm" - attachable: true - appends: true + - role: "stack/deploy_network_overlay" + vars: + network_name: "public" - #################### - # - Config Creation - #################### - - name: "Create Docker Configs" - community.docker.docker_config: - state: "present" - name: "{{ item }}" - data: "{{ lookup('template', './configs/' ~ item) | b64encode }}" - data_is_b64: "true" - with_items: "{{ stack_configs }}" + - role: "stack/deploy_configs" + vars: + stack_configs_gen: + site-support__security.txt: "securitytxt" + + # `securitytxt` Generation Variables + securitytxt__mailto: "s174509@dtu.dk" + securitytxt__expiry: "" + securitytxt__gpg_id: "E3B345EFFF5B3994BC1D12603D01BE95F3EFFEB9" + securitytxt__domain: "https://timesigned.com" - #################### - # - Stack Deployment - #################### - - name: "Upload Stack to /tmp" - template: - src: "./docker-compose.yml" - dest: "/tmp/{{ stack_name }}.yml" - owner: "root" - group: "root" - mode: "0640" - - - name: "Deploy Stack: {{ stack_name }}" - community.docker.docker_stack: - state: "present" - prune: "true" - name: "{{ stack_name }}" - compose: - - "/tmp/{{ stack_name }}.yml" - - - name: "Delete /tmp Stack" - ansible.builtin.file: - path: "/tmp/{{ stack_name }}.yml" - state: "absent" + - role: "stack/deploy_stack"