feat: Add Nextcloud maintenance automation and cleanup
- Add 260124-nextcloud-maintenance.yml playbook for database indices and mimetypes - Add run-maintenance-all-servers.sh script to run maintenance on all servers - Update ansible.cfg with IdentitiesOnly SSH option to prevent auth failures - Remove orphaned SSH keys for deleted servers (black, dev, purple, white, edge) - Remove obsolete edge-traefik and nat-gateway roles - Remove old upgrade playbooks and fix-private-network playbook - Update host_vars for egel, ree, zwaan - Update diun webhook configuration Successfully ran maintenance on all 17 active servers: - Database indices optimized - Mimetypes updated (145-157 new types on most servers) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
60513601d4
commit
39c57d583a
29 changed files with 396 additions and 1368 deletions
|
|
@ -37,4 +37,4 @@ become_ask_pass = False
|
||||||
|
|
||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
pipelining = True
|
pipelining = True
|
||||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
|
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
---
|
---
|
||||||
# egel server - behind edge proxy (private network only)
|
# egel server - direct public IP
|
||||||
|
|
||||||
# SSH via edge server as bastion/jump host
|
# SSH directly to public IP
|
||||||
ansible_host: 10.0.0.52
|
ansible_host: 168.119.63.75
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -i ../keys/ssh/edge -W %h:%p -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@78.47.191.38" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
|
|
||||||
|
|
||||||
# Client identification
|
# Client identification
|
||||||
client_name: egel
|
client_name: egel
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
---
|
---
|
||||||
# ree server - behind edge proxy (private network only)
|
# ree server - direct public IP
|
||||||
|
|
||||||
# SSH via edge server as bastion/jump host
|
# SSH directly to public IP
|
||||||
ansible_host: 10.0.0.49
|
ansible_host: 159.69.182.238
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -i ../keys/ssh/edge -W %h:%p -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@78.47.191.38" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
|
|
||||||
|
|
||||||
# Client identification
|
# Client identification
|
||||||
client_name: ree
|
client_name: ree
|
||||||
|
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
---
|
|
||||||
# white server - behind edge proxy (private network only)
|
|
||||||
|
|
||||||
# SSH via edge server as bastion/jump host
|
|
||||||
ansible_host: 10.0.0.40
|
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -i ../keys/ssh/edge -W %h:%p -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@78.47.191.38" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
|
|
||||||
|
|
||||||
# Client identification
|
|
||||||
client_name: white
|
|
||||||
client_domain: white.vrije.cloud
|
|
||||||
client_secrets_file: white.sops.yaml
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
---
|
---
|
||||||
# zwaan server - behind edge proxy (private network only)
|
# zwaan server - direct public IP
|
||||||
|
|
||||||
# SSH via edge server as bastion/jump host
|
# SSH directly to public IP
|
||||||
ansible_host: 10.0.0.42
|
ansible_host: 162.55.43.144
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -i ../keys/ssh/edge -W %h:%p -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@78.47.191.38" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
|
|
||||||
|
|
||||||
# Client identification
|
# Client identification
|
||||||
client_name: zwaan
|
client_name: zwaan
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,17 @@
|
||||||
become: yes
|
become: yes
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
|
# Diun base configuration (from role defaults)
|
||||||
|
diun_version: "latest"
|
||||||
|
diun_log_level: "info"
|
||||||
|
diun_watch_workers: 10
|
||||||
|
diun_watch_all: true
|
||||||
|
diun_exclude_containers: []
|
||||||
|
diun_first_check_notif: false
|
||||||
|
|
||||||
|
# Schedule: Daily at 6am UTC
|
||||||
|
diun_schedule: "0 6 * * *"
|
||||||
|
|
||||||
# Webhook configuration - sends to Matrix via custom webhook
|
# Webhook configuration - sends to Matrix via custom webhook
|
||||||
diun_notif_enabled: true
|
diun_notif_enabled: true
|
||||||
diun_notif_type: webhook
|
diun_notif_type: webhook
|
||||||
|
|
@ -25,8 +36,19 @@
|
||||||
# Disable email notifications
|
# Disable email notifications
|
||||||
diun_email_enabled: false
|
diun_email_enabled: false
|
||||||
|
|
||||||
# Schedule: Weekly on Monday at 6am UTC
|
# SMTP defaults (not used when email disabled, but needed for template)
|
||||||
diun_schedule: "0 6 * * 1"
|
diun_smtp_host: "smtp.eu.mailgun.org"
|
||||||
|
diun_smtp_port: 587
|
||||||
|
diun_smtp_from: "{{ client_name }}@mg.vrije.cloud"
|
||||||
|
diun_smtp_to: "pieter@postxsociety.org"
|
||||||
|
|
||||||
|
# Optional notification defaults (unused but needed for template)
|
||||||
|
diun_slack_webhook_url: ""
|
||||||
|
diun_matrix_enabled: false
|
||||||
|
diun_matrix_homeserver_url: ""
|
||||||
|
diun_matrix_user: ""
|
||||||
|
diun_matrix_password: ""
|
||||||
|
diun_matrix_room_id: ""
|
||||||
|
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: Gather facts
|
- name: Gather facts
|
||||||
|
|
@ -56,13 +78,19 @@
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
- name: Set SMTP credentials (required by template even if unused)
|
||||||
|
set_fact:
|
||||||
|
diun_smtp_username_final: "{{ client_secrets.mailgun_smtp_user | default('') }}"
|
||||||
|
diun_smtp_password_final: ""
|
||||||
|
no_log: true
|
||||||
|
|
||||||
- name: Display configuration summary
|
- name: Display configuration summary
|
||||||
debug:
|
debug:
|
||||||
msg: |
|
msg: |
|
||||||
Configuring Diun on {{ inventory_hostname }}:
|
Configuring Diun on {{ inventory_hostname }}:
|
||||||
- Webhook endpoint: {{ diun_webhook_endpoint }}
|
- Webhook endpoint: {{ diun_webhook_endpoint }}
|
||||||
- Email notifications: {{ 'enabled' if diun_email_enabled else 'disabled' }}
|
- Email notifications: {{ 'enabled' if diun_email_enabled else 'disabled' }}
|
||||||
- Schedule: {{ diun_schedule }} (Weekly Monday 6am UTC)
|
- Schedule: {{ diun_schedule }} (Daily at 6am UTC)
|
||||||
|
|
||||||
- name: Deploy Diun configuration with webhook
|
- name: Deploy Diun configuration with webhook
|
||||||
template:
|
template:
|
||||||
|
|
|
||||||
|
|
@ -1,118 +0,0 @@
|
||||||
---
|
|
||||||
# Nextcloud Upgrade Stage Task File
|
|
||||||
# This file is included by 260123-upgrade-nextcloud.yml for each upgrade stage
|
|
||||||
# Do not run directly
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Upgrade from v{{ stage.from }} to v{{ stage.to }}"
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
============================================================
|
|
||||||
Starting Stage {{ stage.stage }}: v{{ stage.from }} → v{{ stage.to }}
|
|
||||||
============================================================
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Verify current version is v{{ stage.from }}"
|
|
||||||
shell: docker exec -u www-data nextcloud php occ status --output=json
|
|
||||||
register: stage_version_check
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Parse current version"
|
|
||||||
set_fact:
|
|
||||||
stage_current: "{{ (stage_version_check.stdout | from_json).versionstring }}"
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Check version compatibility"
|
|
||||||
fail:
|
|
||||||
msg: "Expected v{{ stage.from }}.x but found v{{ stage_current }}"
|
|
||||||
when: stage_current is version(stage.from, '<') or stage_current is version(stage.to, '>=')
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Disable non-essential apps"
|
|
||||||
shell: |
|
|
||||||
docker exec -u www-data nextcloud php occ app:list --output=json | \
|
|
||||||
jq -r '.enabled | keys[]' | \
|
|
||||||
grep -Ev '^(files|dav|federatedfilesharing|settings|provisioning_api|files_sharing|files_trashbin|files_versions|comments|contactsinteraction|dashboard|activity|notifications|user_status|weather_status|workflowengine)$' | \
|
|
||||||
while read app; do
|
|
||||||
echo "Disabling $app"
|
|
||||||
docker exec -u www-data nextcloud php occ app:disable "$app" || true
|
|
||||||
done
|
|
||||||
register: apps_disabled
|
|
||||||
changed_when: "'Disabling' in apps_disabled.stdout"
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Update docker-compose.yml to v{{ stage.to }}"
|
|
||||||
replace:
|
|
||||||
path: "{{ nextcloud_base_dir }}/docker-compose.yml"
|
|
||||||
regexp: 'image:\s*nextcloud:{{ stage.from }}'
|
|
||||||
replace: 'image: nextcloud:{{ stage.to }}'
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Pull Nextcloud v{{ stage.to }} image"
|
|
||||||
shell: docker pull nextcloud:{{ stage.to }}
|
|
||||||
register: image_pull
|
|
||||||
changed_when: "'Downloaded' in image_pull.stdout or 'Pulling' in image_pull.stdout"
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Start Nextcloud with new version"
|
|
||||||
community.docker.docker_compose_v2:
|
|
||||||
project_src: "{{ nextcloud_base_dir }}"
|
|
||||||
state: present
|
|
||||||
pull: always
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Wait for container to be ready"
|
|
||||||
shell: |
|
|
||||||
timeout=300
|
|
||||||
elapsed=0
|
|
||||||
while [ $elapsed -lt $timeout ]; do
|
|
||||||
if docker exec nextcloud curl -f http://localhost:80/status.php 2>/dev/null; then
|
|
||||||
echo "Container ready"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
sleep 5
|
|
||||||
elapsed=$((elapsed + 5))
|
|
||||||
done
|
|
||||||
echo "Timeout waiting for container"
|
|
||||||
exit 1
|
|
||||||
register: container_ready
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Run occ upgrade"
|
|
||||||
shell: docker exec -u www-data nextcloud php occ upgrade --no-interaction
|
|
||||||
register: occ_upgrade
|
|
||||||
changed_when: "'Update successful' in occ_upgrade.stdout or 'upgraded' in occ_upgrade.stdout"
|
|
||||||
failed_when:
|
|
||||||
- occ_upgrade.rc != 0
|
|
||||||
- "'already latest version' not in occ_upgrade.stdout"
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Display upgrade output"
|
|
||||||
debug:
|
|
||||||
msg: "{{ occ_upgrade.stdout_lines }}"
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Verify upgrade succeeded"
|
|
||||||
shell: docker exec -u www-data nextcloud php occ status --output=json
|
|
||||||
register: stage_verify
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Parse upgraded version"
|
|
||||||
set_fact:
|
|
||||||
stage_upgraded: "{{ (stage_verify.stdout | from_json).versionstring }}"
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Check upgrade was successful"
|
|
||||||
fail:
|
|
||||||
msg: "Upgrade to v{{ stage.to }} failed - still on v{{ stage_upgraded }}"
|
|
||||||
when: stage_upgraded is version(stage.to, '<')
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Run database migrations"
|
|
||||||
shell: docker exec -u www-data nextcloud php occ db:add-missing-indices
|
|
||||||
register: db_indices
|
|
||||||
changed_when: "'indices added' in db_indices.stdout"
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Run database column conversions"
|
|
||||||
shell: docker exec -u www-data nextcloud php occ db:convert-filecache-bigint --no-interaction
|
|
||||||
register: db_bigint
|
|
||||||
changed_when: "'converted' in db_bigint.stdout"
|
|
||||||
failed_when: false
|
|
||||||
timeout: 600
|
|
||||||
|
|
||||||
- name: "Stage {{ stage.stage }}: Success"
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
============================================================
|
|
||||||
✓ Stage {{ stage.stage }} completed successfully
|
|
||||||
Upgraded from v{{ stage.from }} to v{{ stage_upgraded }}
|
|
||||||
============================================================
|
|
||||||
|
|
@ -246,53 +246,61 @@
|
||||||
# DETERMINE UPGRADE PATH
|
# DETERMINE UPGRADE PATH
|
||||||
# ============================================================
|
# ============================================================
|
||||||
|
|
||||||
- name: Get current version for upgrade planning
|
- name: Initialize stage counter
|
||||||
|
set_fact:
|
||||||
|
stage_number: 0
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
# STAGED UPGRADE LOOP - Dynamic version checking
|
||||||
|
# ============================================================
|
||||||
|
|
||||||
|
- name: Stage 1 - Upgrade v30→v31 if needed
|
||||||
|
block:
|
||||||
|
- name: Get current version
|
||||||
shell: docker exec -u www-data nextcloud php occ status --output=json
|
shell: docker exec -u www-data nextcloud php occ status --output=json
|
||||||
register: current_version_check
|
register: version_check
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: Parse current version
|
- name: Parse version
|
||||||
set_fact:
|
set_fact:
|
||||||
current_version: "{{ (current_version_check.stdout | from_json).versionstring }}"
|
current_version: "{{ (version_check.stdout | from_json).versionstring }}"
|
||||||
|
|
||||||
- name: Determine required upgrade stages
|
- name: Check if v30→v31 upgrade needed
|
||||||
set_fact:
|
set_fact:
|
||||||
required_stages: "{{ [] }}"
|
needs_v31_upgrade: "{{ current_version is version('30', '>=') and current_version is version('31', '<') }}"
|
||||||
|
|
||||||
- name: Add v30→v31 stage if needed
|
- name: Perform v30→v31 upgrade
|
||||||
set_fact:
|
|
||||||
required_stages: "{{ required_stages + [{'from': '30', 'to': '31', 'stage': 1}] }}"
|
|
||||||
when: current_version is version('30', '>=') and current_version is version('31', '<')
|
|
||||||
|
|
||||||
- name: Add v31→v32 stage if needed
|
|
||||||
set_fact:
|
|
||||||
required_stages: "{{ required_stages + [{'from': '31', 'to': '32', 'stage': 2}] }}"
|
|
||||||
when: current_version is version('31', '>=') and current_version is version('32', '<')
|
|
||||||
|
|
||||||
- name: Display upgrade stages
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
Current version: v{{ current_version }}
|
|
||||||
Required stages: {{ required_stages | length }}
|
|
||||||
{% if required_stages | length > 0 %}
|
|
||||||
Will upgrade: {% for stage in required_stages %}v{{ stage.from }}→v{{ stage.to }}{{ ' ' if not loop.last else '' }}{% endfor %}
|
|
||||||
{% else %}
|
|
||||||
No upgrade stages needed
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
- name: Skip upgrade if no stages needed
|
|
||||||
meta: end_host
|
|
||||||
when: required_stages | length == 0
|
|
||||||
|
|
||||||
# ============================================================
|
|
||||||
# STAGED UPGRADE LOOP
|
|
||||||
# ============================================================
|
|
||||||
|
|
||||||
- name: Perform staged upgrades
|
|
||||||
include_tasks: "{{ playbook_dir }}/260123-upgrade-nextcloud-stage-v2.yml"
|
include_tasks: "{{ playbook_dir }}/260123-upgrade-nextcloud-stage-v2.yml"
|
||||||
loop: "{{ required_stages }}"
|
vars:
|
||||||
loop_control:
|
stage:
|
||||||
loop_var: stage
|
from: "30"
|
||||||
|
to: "31"
|
||||||
|
stage: 1
|
||||||
|
when: needs_v31_upgrade
|
||||||
|
|
||||||
|
- name: Stage 2 - Upgrade v31→v32 if needed
|
||||||
|
block:
|
||||||
|
- name: Get current version
|
||||||
|
shell: docker exec -u www-data nextcloud php occ status --output=json
|
||||||
|
register: version_check
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Parse version
|
||||||
|
set_fact:
|
||||||
|
current_version: "{{ (version_check.stdout | from_json).versionstring }}"
|
||||||
|
|
||||||
|
- name: Check if v31→v32 upgrade needed
|
||||||
|
set_fact:
|
||||||
|
needs_v32_upgrade: "{{ current_version is version('31', '>=') and current_version is version('32', '<') }}"
|
||||||
|
|
||||||
|
- name: Perform v31→v32 upgrade
|
||||||
|
include_tasks: "{{ playbook_dir }}/260123-upgrade-nextcloud-stage-v2.yml"
|
||||||
|
vars:
|
||||||
|
stage:
|
||||||
|
from: "31"
|
||||||
|
to: "32"
|
||||||
|
stage: 2
|
||||||
|
when: needs_v32_upgrade
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
# POST-UPGRADE
|
# POST-UPGRADE
|
||||||
|
|
|
||||||
|
|
@ -1,292 +0,0 @@
|
||||||
---
|
|
||||||
# Nextcloud Major Version Upgrade Playbook
|
|
||||||
# Created: 2026-01-23
|
|
||||||
# Purpose: Safely upgrade Nextcloud from v30 to v32 via v31 (staged upgrade)
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# ansible-playbook -i hcloud.yml playbooks/260123-upgrade-nextcloud.yml --limit kikker
|
|
||||||
#
|
|
||||||
# Requirements:
|
|
||||||
# - HCLOUD_TOKEN environment variable set
|
|
||||||
# - SSH access to target server
|
|
||||||
# - Sufficient disk space for backups
|
|
||||||
#
|
|
||||||
# Notes:
|
|
||||||
# - Nextcloud does NOT support skipping major versions
|
|
||||||
# - This playbook performs: v30 → v31 → v32
|
|
||||||
# - Creates backups before each stage
|
|
||||||
# - Automatic rollback on failure
|
|
||||||
|
|
||||||
- name: Upgrade Nextcloud from v30 to v32 (staged)
|
|
||||||
hosts: all
|
|
||||||
become: true
|
|
||||||
gather_facts: true
|
|
||||||
|
|
||||||
vars:
|
|
||||||
nextcloud_base_dir: "/opt/nextcloud"
|
|
||||||
backup_dir: "/root/nextcloud-backup-{{ ansible_date_time.iso8601_basic_short }}"
|
|
||||||
upgrade_stages:
|
|
||||||
- { from: "30", to: "31", stage: 1 }
|
|
||||||
- { from: "31", to: "32", stage: 2 }
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
# ============================================================
|
|
||||||
# PRE-UPGRADE CHECKS AND PREPARATION
|
|
||||||
# ============================================================
|
|
||||||
|
|
||||||
- name: Display upgrade plan
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
============================================================
|
|
||||||
Nextcloud Upgrade Plan - {{ inventory_hostname }}
|
|
||||||
============================================================
|
|
||||||
|
|
||||||
Upgrade Path: v30 → v31 → v32
|
|
||||||
|
|
||||||
This playbook will:
|
|
||||||
1. Check current Nextcloud version
|
|
||||||
2. Create full backup of volumes and database
|
|
||||||
3. Disable all apps except core ones
|
|
||||||
4. Upgrade to v31 (Stage 1)
|
|
||||||
5. Verify v31 is working
|
|
||||||
6. Upgrade to v32 (Stage 2)
|
|
||||||
7. Verify v32 is working
|
|
||||||
8. Re-enable apps
|
|
||||||
|
|
||||||
Backup location: {{ backup_dir }}
|
|
||||||
|
|
||||||
Estimated time: 15-25 minutes
|
|
||||||
============================================================
|
|
||||||
|
|
||||||
- name: Check if Nextcloud is installed
|
|
||||||
shell: docker ps --filter "name=nextcloud" --format "{{ '{{' }}.Names{{ '}}' }}"
|
|
||||||
register: nextcloud_running
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Fail if Nextcloud is not running
|
|
||||||
fail:
|
|
||||||
msg: "Nextcloud container is not running on {{ inventory_hostname }}"
|
|
||||||
when: "'nextcloud' not in nextcloud_running.stdout"
|
|
||||||
|
|
||||||
- name: Get current Nextcloud version
|
|
||||||
shell: docker exec -u www-data nextcloud php occ status --output=json
|
|
||||||
register: nextcloud_status
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Parse Nextcloud status
|
|
||||||
set_fact:
|
|
||||||
nc_status: "{{ nextcloud_status.stdout | from_json }}"
|
|
||||||
|
|
||||||
- name: Display current version
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
Current version: {{ nc_status.versionstring }}
|
|
||||||
Installed: {{ nc_status.installed }}
|
|
||||||
Maintenance mode: {{ nc_status.maintenance }}
|
|
||||||
Needs DB upgrade: {{ nc_status.needsDbUpgrade }}
|
|
||||||
|
|
||||||
- name: Check if already on target version
|
|
||||||
debug:
|
|
||||||
msg: "Nextcloud is already on v32.x - skipping upgrade"
|
|
||||||
when: nc_status.versionstring is version('32', '>=')
|
|
||||||
|
|
||||||
- name: End play if already upgraded
|
|
||||||
meta: end_host
|
|
||||||
when: nc_status.versionstring is version('32', '>=')
|
|
||||||
|
|
||||||
- name: Verify starting version is v30.x
|
|
||||||
fail:
|
|
||||||
msg: "This playbook only upgrades from v30. Current version: {{ nc_status.versionstring }}"
|
|
||||||
when: nc_status.versionstring is version('30', '<') or nc_status.versionstring is version('31', '>=')
|
|
||||||
|
|
||||||
- name: Check disk space
|
|
||||||
shell: df -h {{ nextcloud_base_dir }} | tail -1 | awk '{print $4}'
|
|
||||||
register: disk_space
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Display available disk space
|
|
||||||
debug:
|
|
||||||
msg: "Available disk space: {{ disk_space.stdout }}"
|
|
||||||
|
|
||||||
- name: Check if maintenance mode is enabled
|
|
||||||
fail:
|
|
||||||
msg: "Nextcloud is in maintenance mode. Please investigate before upgrading."
|
|
||||||
when: nc_status.maintenance | bool
|
|
||||||
|
|
||||||
# ============================================================
|
|
||||||
# BACKUP PHASE
|
|
||||||
# ============================================================
|
|
||||||
|
|
||||||
- name: Create backup directory
|
|
||||||
file:
|
|
||||||
path: "{{ backup_dir }}"
|
|
||||||
state: directory
|
|
||||||
mode: '0700'
|
|
||||||
|
|
||||||
- name: Enable Nextcloud maintenance mode
|
|
||||||
shell: docker exec -u www-data nextcloud php occ maintenance:mode --on
|
|
||||||
register: maintenance_on
|
|
||||||
changed_when: "'Maintenance mode enabled' in maintenance_on.stdout"
|
|
||||||
|
|
||||||
- name: Backup Nextcloud database
|
|
||||||
shell: |
|
|
||||||
docker exec nextcloud-db pg_dump -U nextcloud nextcloud | gzip > {{ backup_dir }}/database.sql.gz
|
|
||||||
args:
|
|
||||||
creates: "{{ backup_dir }}/database.sql.gz"
|
|
||||||
|
|
||||||
- name: Get database backup size
|
|
||||||
stat:
|
|
||||||
path: "{{ backup_dir }}/database.sql.gz"
|
|
||||||
register: db_backup
|
|
||||||
|
|
||||||
- name: Display database backup info
|
|
||||||
debug:
|
|
||||||
msg: "Database backup: {{ (db_backup.stat.size / 1024 / 1024) | round(2) }} MB"
|
|
||||||
|
|
||||||
- name: Stop Nextcloud containers (for volume backup)
|
|
||||||
community.docker.docker_compose_v2:
|
|
||||||
project_src: "{{ nextcloud_base_dir }}"
|
|
||||||
state: stopped
|
|
||||||
|
|
||||||
- name: Backup Nextcloud app volume
|
|
||||||
shell: |
|
|
||||||
tar -czf {{ backup_dir }}/nextcloud-app-volume.tar.gz -C /var/lib/docker/volumes/nextcloud-app/_data .
|
|
||||||
args:
|
|
||||||
creates: "{{ backup_dir }}/nextcloud-app-volume.tar.gz"
|
|
||||||
|
|
||||||
- name: Backup Nextcloud database volume
|
|
||||||
shell: |
|
|
||||||
tar -czf {{ backup_dir }}/nextcloud-db-volume.tar.gz -C /var/lib/docker/volumes/nextcloud-db-data/_data .
|
|
||||||
args:
|
|
||||||
creates: "{{ backup_dir }}/nextcloud-db-volume.tar.gz"
|
|
||||||
|
|
||||||
- name: Copy current docker-compose.yml to backup
|
|
||||||
copy:
|
|
||||||
src: "{{ nextcloud_base_dir }}/docker-compose.yml"
|
|
||||||
dest: "{{ backup_dir }}/docker-compose.yml.backup"
|
|
||||||
remote_src: true
|
|
||||||
|
|
||||||
- name: Display backup summary
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
============================================================
|
|
||||||
Backup completed: {{ backup_dir }}
|
|
||||||
|
|
||||||
To restore from backup if needed:
|
|
||||||
1. Stop containers: cd {{ nextcloud_base_dir }} && docker compose down
|
|
||||||
2. Restore app volume: tar -xzf {{ backup_dir }}/nextcloud-app-volume.tar.gz -C /var/lib/docker/volumes/nextcloud-app/_data
|
|
||||||
3. Restore DB volume: tar -xzf {{ backup_dir }}/nextcloud-db-volume.tar.gz -C /var/lib/docker/volumes/nextcloud-db-data/_data
|
|
||||||
4. Restore compose file: cp {{ backup_dir }}/docker-compose.yml.backup {{ nextcloud_base_dir }}/docker-compose.yml
|
|
||||||
5. Start containers: cd {{ nextcloud_base_dir }} && docker compose up -d
|
|
||||||
============================================================
|
|
||||||
|
|
||||||
- name: Restart Nextcloud containers after backup
|
|
||||||
community.docker.docker_compose_v2:
|
|
||||||
project_src: "{{ nextcloud_base_dir }}"
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Wait for Nextcloud to be ready after backup restore
|
|
||||||
shell: |
|
|
||||||
timeout=120
|
|
||||||
elapsed=0
|
|
||||||
while [ $elapsed -lt $timeout ]; do
|
|
||||||
if docker exec nextcloud curl -f http://localhost:80/status.php 2>/dev/null; then
|
|
||||||
echo "Nextcloud ready"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
sleep 5
|
|
||||||
elapsed=$((elapsed + 5))
|
|
||||||
done
|
|
||||||
echo "Timeout waiting for Nextcloud"
|
|
||||||
exit 1
|
|
||||||
register: nextcloud_restored
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
# ============================================================
|
|
||||||
# STAGED UPGRADE LOOP
|
|
||||||
# ============================================================
|
|
||||||
|
|
||||||
- name: Perform staged upgrades
|
|
||||||
include_tasks: "{{ playbook_dir }}/260123-upgrade-nextcloud-stage.yml"
|
|
||||||
loop: "{{ upgrade_stages }}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: stage
|
|
||||||
|
|
||||||
# ============================================================
|
|
||||||
# POST-UPGRADE VALIDATION
|
|
||||||
# ============================================================
|
|
||||||
|
|
||||||
- name: Get final Nextcloud version
|
|
||||||
shell: docker exec -u www-data nextcloud php occ status --output=json
|
|
||||||
register: final_status
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Parse final status
|
|
||||||
set_fact:
|
|
||||||
final_nc_status: "{{ final_status.stdout | from_json }}"
|
|
||||||
|
|
||||||
- name: Verify upgrade to v32
|
|
||||||
fail:
|
|
||||||
msg: "Upgrade failed - still on v{{ final_nc_status.versionstring }}"
|
|
||||||
when: final_nc_status.versionstring is version('32', '<')
|
|
||||||
|
|
||||||
- name: Run Nextcloud system check
|
|
||||||
shell: docker exec -u www-data nextcloud php occ check
|
|
||||||
register: system_check
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Display system check results
|
|
||||||
debug:
|
|
||||||
msg: "{{ system_check.stdout_lines }}"
|
|
||||||
|
|
||||||
- name: Re-enable user_oidc app
|
|
||||||
shell: docker exec -u www-data nextcloud php occ app:enable user_oidc
|
|
||||||
register: oidc_enable
|
|
||||||
changed_when: "'enabled' in oidc_enable.stdout"
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Re-enable richdocuments (Collabora)
|
|
||||||
shell: docker exec -u www-data nextcloud php occ app:enable richdocuments
|
|
||||||
register: collabora_enable
|
|
||||||
changed_when: "'enabled' in collabora_enable.stdout"
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Disable maintenance mode
|
|
||||||
shell: docker exec -u www-data nextcloud php occ maintenance:mode --off
|
|
||||||
register: maintenance_off
|
|
||||||
changed_when: "'Maintenance mode disabled' in maintenance_off.stdout"
|
|
||||||
|
|
||||||
- name: Update docker-compose.yml to use 'latest' tag
|
|
||||||
lineinfile:
|
|
||||||
path: "{{ nextcloud_base_dir }}/docker-compose.yml"
|
|
||||||
regexp: '^\s*image:\s*nextcloud:32\s*$'
|
|
||||||
line: ' image: nextcloud:latest'
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Display upgrade success message
|
|
||||||
debug:
|
|
||||||
msg: |
|
|
||||||
============================================================
|
|
||||||
✓ Nextcloud Upgrade SUCCESSFUL!
|
|
||||||
============================================================
|
|
||||||
|
|
||||||
Server: {{ inventory_hostname }}
|
|
||||||
Previous version: v30.x
|
|
||||||
New version: v{{ final_nc_status.versionstring }}
|
|
||||||
|
|
||||||
Backup location: {{ backup_dir }}
|
|
||||||
|
|
||||||
Next steps:
|
|
||||||
1. Test login at: https://nextcloud.{{ client_domain }}
|
|
||||||
2. Test OIDC login (Login with Authentik)
|
|
||||||
3. Test file upload/download
|
|
||||||
4. Test Collabora Office integration
|
|
||||||
|
|
||||||
If everything works, you can remove the backup:
|
|
||||||
rm -rf {{ backup_dir }}
|
|
||||||
|
|
||||||
The docker-compose.yml has been updated to use 'latest' tag
|
|
||||||
for future automatic updates.
|
|
||||||
============================================================
|
|
||||||
151
ansible/playbooks/260124-nextcloud-maintenance.yml
Normal file
151
ansible/playbooks/260124-nextcloud-maintenance.yml
Normal file
|
|
@ -0,0 +1,151 @@
|
||||||
|
---
|
||||||
|
# Nextcloud Maintenance Playbook
|
||||||
|
# Created: 2026-01-24
|
||||||
|
# Purpose: Run database and file maintenance tasks on Nextcloud instances
|
||||||
|
#
|
||||||
|
# This playbook performs:
|
||||||
|
# 1. Add missing database indices (improves query performance)
|
||||||
|
# 2. Update mimetypes database (ensures proper file type handling)
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# cd ansible/
|
||||||
|
# HCLOUD_TOKEN="..." ansible-playbook -i hcloud.yml \
|
||||||
|
# playbooks/nextcloud-maintenance.yml --limit <server> \
|
||||||
|
# --private-key "../keys/ssh/<server>"
|
||||||
|
#
|
||||||
|
# To run on all servers:
|
||||||
|
# HCLOUD_TOKEN="..." ansible-playbook -i hcloud.yml \
|
||||||
|
# playbooks/nextcloud-maintenance.yml \
|
||||||
|
# --private-key "../keys/ssh/<server>"
|
||||||
|
#
|
||||||
|
# Requirements:
|
||||||
|
# - HCLOUD_TOKEN environment variable set
|
||||||
|
# - SSH access to target server(s)
|
||||||
|
# - Nextcloud container must be running
|
||||||
|
|
||||||
|
- name: Nextcloud Maintenance Tasks
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
vars:
|
||||||
|
nextcloud_container: "nextcloud"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
# ============================================================
|
||||||
|
# PRE-CHECK
|
||||||
|
# ============================================================
|
||||||
|
|
||||||
|
- name: Display maintenance plan
|
||||||
|
debug:
|
||||||
|
msg: |
|
||||||
|
============================================================
|
||||||
|
Nextcloud Maintenance - {{ inventory_hostname }}
|
||||||
|
============================================================
|
||||||
|
|
||||||
|
This playbook will:
|
||||||
|
1. Add missing database indices
|
||||||
|
2. Update mimetypes database
|
||||||
|
3. Display results
|
||||||
|
|
||||||
|
Estimated time: 1-3 minutes per server
|
||||||
|
============================================================
|
||||||
|
|
||||||
|
- name: Check if Nextcloud container is running
|
||||||
|
shell: docker ps --filter "name=^{{ nextcloud_container }}$" --format "{{ '{{' }}.Names{{ '}}' }}"
|
||||||
|
register: nextcloud_running
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Fail if Nextcloud is not running
|
||||||
|
fail:
|
||||||
|
msg: "Nextcloud container is not running on {{ inventory_hostname }}"
|
||||||
|
when: "'nextcloud' not in nextcloud_running.stdout"
|
||||||
|
|
||||||
|
- name: Get current Nextcloud version
|
||||||
|
shell: docker exec -u www-data {{ nextcloud_container }} php occ --version
|
||||||
|
register: nextcloud_version
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Display Nextcloud version
|
||||||
|
debug:
|
||||||
|
msg: "{{ nextcloud_version.stdout }}"
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
# TASK 1: ADD MISSING DATABASE INDICES
|
||||||
|
# ============================================================
|
||||||
|
|
||||||
|
- name: Check for missing database indices
|
||||||
|
shell: docker exec -u www-data {{ nextcloud_container }} php occ db:add-missing-indices
|
||||||
|
register: db_indices_result
|
||||||
|
changed_when: "'updated successfully' in db_indices_result.stdout"
|
||||||
|
failed_when: db_indices_result.rc != 0
|
||||||
|
|
||||||
|
- name: Display database indices results
|
||||||
|
debug:
|
||||||
|
msg: |
|
||||||
|
============================================================
|
||||||
|
Database Indices Results
|
||||||
|
============================================================
|
||||||
|
{{ db_indices_result.stdout }}
|
||||||
|
============================================================
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
# TASK 2: UPDATE MIMETYPES DATABASE
|
||||||
|
# ============================================================
|
||||||
|
|
||||||
|
- name: Update mimetypes database
|
||||||
|
shell: docker exec -u www-data {{ nextcloud_container }} php occ maintenance:mimetype:update-db
|
||||||
|
register: mimetype_result
|
||||||
|
changed_when: "'Added' in mimetype_result.stdout"
|
||||||
|
failed_when: mimetype_result.rc != 0
|
||||||
|
|
||||||
|
- name: Parse mimetype results
|
||||||
|
set_fact:
|
||||||
|
mimetypes_added: "{{ mimetype_result.stdout | regex_search('Added (\\d+) new mimetypes', '\\1') | default(['0'], true) | first }}"
|
||||||
|
|
||||||
|
- name: Display mimetype results
|
||||||
|
debug:
|
||||||
|
msg: |
|
||||||
|
============================================================
|
||||||
|
Mimetype Update Results
|
||||||
|
============================================================
|
||||||
|
Mimetypes added: {{ mimetypes_added }}
|
||||||
|
{% if mimetypes_added | int > 0 %}
|
||||||
|
✓ Mimetype database updated successfully
|
||||||
|
{% else %}
|
||||||
|
✓ All mimetypes already up to date
|
||||||
|
{% endif %}
|
||||||
|
============================================================
|
||||||
|
|
||||||
|
# ============================================================
|
||||||
|
# SUMMARY
|
||||||
|
# ============================================================
|
||||||
|
|
||||||
|
- name: Display maintenance summary
|
||||||
|
debug:
|
||||||
|
msg: |
|
||||||
|
============================================================
|
||||||
|
✓ MAINTENANCE COMPLETED - {{ inventory_hostname }}
|
||||||
|
============================================================
|
||||||
|
|
||||||
|
Server: {{ inventory_hostname }}
|
||||||
|
Version: {{ nextcloud_version.stdout }}
|
||||||
|
|
||||||
|
Tasks completed:
|
||||||
|
{% if db_indices_result.changed %}
|
||||||
|
✓ Database indices: Updated
|
||||||
|
{% else %}
|
||||||
|
✓ Database indices: Already optimized
|
||||||
|
{% endif %}
|
||||||
|
{% if mimetype_result.changed %}
|
||||||
|
✓ Mimetypes: Added {{ mimetypes_added }} new types
|
||||||
|
{% else %}
|
||||||
|
✓ Mimetypes: Already up to date
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
Next steps:
|
||||||
|
- Check admin interface for any remaining warnings
|
||||||
|
- Warnings may take a few minutes to clear from cache
|
||||||
|
|
||||||
|
============================================================
|
||||||
|
|
@ -1,48 +0,0 @@
|
||||||
---
|
|
||||||
# Playbook to fix private network configuration on servers
|
|
||||||
# This fixes the netplan configuration to properly enable DHCP
|
|
||||||
# on the private network interface (enp7s0)
|
|
||||||
|
|
||||||
- name: Fix private network configuration
|
|
||||||
hosts: all
|
|
||||||
gather_facts: no
|
|
||||||
become: yes
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Check if server is reachable
|
|
||||||
ansible.builtin.wait_for_connection:
|
|
||||||
timeout: 5
|
|
||||||
register: connection_test
|
|
||||||
ignore_errors: yes
|
|
||||||
|
|
||||||
- name: Create corrected netplan configuration for private network
|
|
||||||
ansible.builtin.copy:
|
|
||||||
dest: /etc/netplan/60-private-network.yaml
|
|
||||||
mode: '0600'
|
|
||||||
content: |
|
|
||||||
network:
|
|
||||||
version: 2
|
|
||||||
ethernets:
|
|
||||||
enp7s0:
|
|
||||||
dhcp4: true
|
|
||||||
dhcp4-overrides:
|
|
||||||
use-routes: false
|
|
||||||
routes:
|
|
||||||
- to: default
|
|
||||||
via: 10.0.0.1
|
|
||||||
when: connection_test is succeeded
|
|
||||||
|
|
||||||
- name: Apply netplan configuration
|
|
||||||
ansible.builtin.command: netplan apply
|
|
||||||
when: connection_test is succeeded
|
|
||||||
register: netplan_result
|
|
||||||
|
|
||||||
- name: Show netplan result
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Netplan applied successfully on {{ inventory_hostname }}"
|
|
||||||
when: connection_test is succeeded and netplan_result is succeeded
|
|
||||||
|
|
||||||
- name: Wait for network to stabilize
|
|
||||||
ansible.builtin.wait_for_connection:
|
|
||||||
timeout: 10
|
|
||||||
when: connection_test is succeeded
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
# Setup Edge Server
|
|
||||||
# Configures the edge server with Traefik reverse proxy
|
|
||||||
|
|
||||||
- name: Setup edge server
|
|
||||||
hosts: edge
|
|
||||||
become: yes
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- role: common
|
|
||||||
tags: [common, setup]
|
|
||||||
|
|
||||||
- role: docker
|
|
||||||
tags: [docker, setup]
|
|
||||||
|
|
||||||
- role: nat-gateway
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
||||||
- role: edge-traefik
|
|
||||||
tags: [traefik, edge]
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
# Diun default configuration
|
# Diun default configuration
|
||||||
diun_version: "latest"
|
diun_version: "latest"
|
||||||
diun_schedule: "0 6 * * 1" # Weekly on Monday at 6am UTC (was daily)
|
diun_schedule: "0 6 * * *" # Daily at 6am UTC
|
||||||
diun_log_level: "info"
|
diun_log_level: "info"
|
||||||
diun_watch_workers: 10
|
diun_watch_workers: 10
|
||||||
|
|
||||||
|
|
@ -27,7 +27,7 @@ diun_smtp_to: "pieter@postxsociety.org"
|
||||||
diun_watch_all: true
|
diun_watch_all: true
|
||||||
diun_exclude_containers: []
|
diun_exclude_containers: []
|
||||||
|
|
||||||
# Reduce notification spam - only send ONE email per server per week
|
# Don't send notifications on first check (prevents spam on initial run)
|
||||||
diun_first_check_notif: false
|
diun_first_check_notif: false
|
||||||
|
|
||||||
# Optional: Matrix notification
|
# Optional: Matrix notification
|
||||||
|
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
---
|
|
||||||
# Edge Traefik Default Variables
|
|
||||||
# This Traefik instance acts as a reverse proxy for private network clients
|
|
||||||
|
|
||||||
traefik_version: "v3.3"
|
|
||||||
traefik_network: "web"
|
|
||||||
traefik_docker_socket: "/var/run/docker.sock"
|
|
||||||
traefik_acme_email: "admin@vrije.cloud"
|
|
||||||
traefik_acme_staging: false
|
|
||||||
traefik_dashboard_enabled: false
|
|
||||||
|
|
||||||
# Backend client servers (populated from inventory)
|
|
||||||
backend_clients: []
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
||||||
---
|
|
||||||
# Edge Traefik Handlers
|
|
||||||
|
|
||||||
- name: Restart Traefik
|
|
||||||
community.docker.docker_compose_v2:
|
|
||||||
project_src: /opt/docker/traefik
|
|
||||||
state: restarted
|
|
||||||
|
|
@ -1,60 +0,0 @@
|
||||||
---
|
|
||||||
# Edge Traefik Installation Tasks
|
|
||||||
# Sets up Traefik as edge reverse proxy for private network clients
|
|
||||||
|
|
||||||
- name: Ensure Traefik configuration directory exists
|
|
||||||
file:
|
|
||||||
path: /opt/docker/traefik
|
|
||||||
state: directory
|
|
||||||
mode: '0755'
|
|
||||||
tags: [traefik, edge]
|
|
||||||
|
|
||||||
- name: Create Let's Encrypt storage directory
|
|
||||||
file:
|
|
||||||
path: /opt/docker/traefik/letsencrypt
|
|
||||||
state: directory
|
|
||||||
mode: '0600'
|
|
||||||
tags: [traefik, edge]
|
|
||||||
|
|
||||||
- name: Create Traefik log directory
|
|
||||||
file:
|
|
||||||
path: /var/log/traefik
|
|
||||||
state: directory
|
|
||||||
mode: '0755'
|
|
||||||
tags: [traefik, edge]
|
|
||||||
|
|
||||||
- name: Deploy Traefik static configuration
|
|
||||||
template:
|
|
||||||
src: traefik.yml.j2
|
|
||||||
dest: /opt/docker/traefik/traefik.yml
|
|
||||||
mode: '0644'
|
|
||||||
notify: Restart Traefik
|
|
||||||
tags: [traefik, edge, config]
|
|
||||||
|
|
||||||
- name: Deploy Traefik dynamic configuration (routing rules)
|
|
||||||
template:
|
|
||||||
src: dynamic.yml.j2
|
|
||||||
dest: /opt/docker/traefik/dynamic.yml
|
|
||||||
mode: '0644'
|
|
||||||
notify: Restart Traefik
|
|
||||||
tags: [traefik, edge, config]
|
|
||||||
|
|
||||||
- name: Deploy Traefik Docker Compose file
|
|
||||||
template:
|
|
||||||
src: docker-compose.yml.j2
|
|
||||||
dest: /opt/docker/traefik/docker-compose.yml
|
|
||||||
mode: '0644'
|
|
||||||
tags: [traefik, edge]
|
|
||||||
|
|
||||||
- name: Start Traefik container
|
|
||||||
community.docker.docker_compose_v2:
|
|
||||||
project_src: /opt/docker/traefik
|
|
||||||
state: present
|
|
||||||
tags: [traefik, edge]
|
|
||||||
|
|
||||||
- name: Wait for Traefik to be ready
|
|
||||||
wait_for:
|
|
||||||
port: 443
|
|
||||||
delay: 5
|
|
||||||
timeout: 60
|
|
||||||
tags: [traefik, edge]
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
# Edge Traefik Docker Compose
|
|
||||||
# Managed by Ansible - do not edit manually
|
|
||||||
|
|
||||||
services:
|
|
||||||
traefik:
|
|
||||||
image: traefik:{{ traefik_version }}
|
|
||||||
container_name: traefik
|
|
||||||
restart: unless-stopped
|
|
||||||
security_opt:
|
|
||||||
- no-new-privileges:true
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
- "443:443"
|
|
||||||
{% if traefik_dashboard_enabled %}
|
|
||||||
- "8080:8080"
|
|
||||||
{% endif %}
|
|
||||||
volumes:
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
- ./traefik.yml:/etc/traefik/traefik.yml:ro
|
|
||||||
- ./dynamic.yml:/etc/traefik/dynamic.yml:ro
|
|
||||||
- ./letsencrypt:/letsencrypt
|
|
||||||
- /var/log/traefik:/var/log/traefik
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=false"
|
|
||||||
|
|
@ -1,559 +0,0 @@
|
||||||
# Edge Traefik Dynamic Configuration
|
|
||||||
# Managed by Ansible - do not edit manually
|
|
||||||
# Routes traffic to backend servers on private network
|
|
||||||
|
|
||||||
http:
|
|
||||||
# Routers for white client
|
|
||||||
routers:
|
|
||||||
white-auth:
|
|
||||||
rule: "Host(`auth.white.vrije.cloud`)"
|
|
||||||
service: white-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
white-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.white.vrije.cloud`)"
|
|
||||||
service: white-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
white-collabora:
|
|
||||||
rule: "Host(`office.white.vrije.cloud`)"
|
|
||||||
service: white-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
valk-auth:
|
|
||||||
rule: "Host(`auth.valk.vrije.cloud`)"
|
|
||||||
service: valk-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
valk-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.valk.vrije.cloud`)"
|
|
||||||
service: valk-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
valk-collabora:
|
|
||||||
rule: "Host(`office.valk.vrije.cloud`)"
|
|
||||||
service: valk-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
zwaan-auth:
|
|
||||||
rule: "Host(`auth.zwaan.vrije.cloud`)"
|
|
||||||
service: zwaan-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
zwaan-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.zwaan.vrije.cloud`)"
|
|
||||||
service: zwaan-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
zwaan-collabora:
|
|
||||||
rule: "Host(`office.zwaan.vrije.cloud`)"
|
|
||||||
service: zwaan-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
specht-auth:
|
|
||||||
rule: "Host(`auth.specht.vrije.cloud`)"
|
|
||||||
service: specht-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
specht-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.specht.vrije.cloud`)"
|
|
||||||
service: specht-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
specht-collabora:
|
|
||||||
rule: "Host(`office.specht.vrije.cloud`)"
|
|
||||||
service: specht-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
das-auth:
|
|
||||||
rule: "Host(`auth.das.vrije.cloud`)"
|
|
||||||
service: das-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
das-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.das.vrije.cloud`)"
|
|
||||||
service: das-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
das-collabora:
|
|
||||||
rule: "Host(`office.das.vrije.cloud`)"
|
|
||||||
service: das-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
uil-auth:
|
|
||||||
rule: "Host(`auth.uil.vrije.cloud`)"
|
|
||||||
service: uil-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
uil-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.uil.vrije.cloud`)"
|
|
||||||
service: uil-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
uil-collabora:
|
|
||||||
rule: "Host(`office.uil.vrije.cloud`)"
|
|
||||||
service: uil-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
vos-auth:
|
|
||||||
rule: "Host(`auth.vos.vrije.cloud`)"
|
|
||||||
service: vos-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
vos-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.vos.vrije.cloud`)"
|
|
||||||
service: vos-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
vos-collabora:
|
|
||||||
rule: "Host(`office.vos.vrije.cloud`)"
|
|
||||||
service: vos-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
haas-auth:
|
|
||||||
rule: "Host(`auth.haas.vrije.cloud`)"
|
|
||||||
service: haas-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
haas-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.haas.vrije.cloud`)"
|
|
||||||
service: haas-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
haas-collabora:
|
|
||||||
rule: "Host(`office.haas.vrije.cloud`)"
|
|
||||||
service: haas-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
wolf-auth:
|
|
||||||
rule: "Host(`auth.wolf.vrije.cloud`)"
|
|
||||||
service: wolf-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
wolf-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.wolf.vrije.cloud`)"
|
|
||||||
service: wolf-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
wolf-collabora:
|
|
||||||
rule: "Host(`office.wolf.vrije.cloud`)"
|
|
||||||
service: wolf-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
ree-auth:
|
|
||||||
rule: "Host(`auth.ree.vrije.cloud`)"
|
|
||||||
service: ree-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
ree-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.ree.vrije.cloud`)"
|
|
||||||
service: ree-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
ree-collabora:
|
|
||||||
rule: "Host(`office.ree.vrije.cloud`)"
|
|
||||||
service: ree-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mees-auth:
|
|
||||||
rule: "Host(`auth.mees.vrije.cloud`)"
|
|
||||||
service: mees-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mees-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.mees.vrije.cloud`)"
|
|
||||||
service: mees-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mees-collabora:
|
|
||||||
rule: "Host(`office.mees.vrije.cloud`)"
|
|
||||||
service: mees-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mus-auth:
|
|
||||||
rule: "Host(`auth.mus.vrije.cloud`)"
|
|
||||||
service: mus-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mus-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.mus.vrije.cloud`)"
|
|
||||||
service: mus-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mus-collabora:
|
|
||||||
rule: "Host(`office.mus.vrije.cloud`)"
|
|
||||||
service: mus-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mol-auth:
|
|
||||||
rule: "Host(`auth.mol.vrije.cloud`)"
|
|
||||||
service: mol-auth
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mol-nextcloud:
|
|
||||||
rule: "Host(`nextcloud.mol.vrije.cloud`)"
|
|
||||||
service: mol-nextcloud
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
mol-collabora:
|
|
||||||
rule: "Host(`office.mol.vrije.cloud`)"
|
|
||||||
service: mol-collabora
|
|
||||||
entryPoints:
|
|
||||||
- websecure
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
# Services (backend servers)
|
|
||||||
services:
|
|
||||||
white-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.40:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
white-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.40:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
white-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.40:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
valk-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.41:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
valk-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.41:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
valk-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.41:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
zwaan-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.42:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
zwaan-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.42:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
zwaan-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.42:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
specht-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.43:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
specht-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.43:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
specht-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.43:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
das-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.44:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
das-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.44:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
das-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.44:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
uil-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.45:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
uil-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.45:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
uil-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.45:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
vos-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.46:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
vos-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.46:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
vos-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.46:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
haas-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.47:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
haas-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.47:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
haas-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.47:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
wolf-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.48:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
wolf-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.48:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
wolf-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.48:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
ree-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.49:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
ree-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.49:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
ree-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.49:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mees-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.50:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mees-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.50:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mees-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.50:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mus-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.51:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mus-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.51:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mus-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.51:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mol-auth:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.53:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mol-nextcloud:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.53:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
mol-collabora:
|
|
||||||
loadBalancer:
|
|
||||||
servers:
|
|
||||||
- url: "https://10.0.0.53:443"
|
|
||||||
serversTransport: insecureTransport
|
|
||||||
|
|
||||||
# Server transport (allow self-signed certs from backends)
|
|
||||||
serversTransports:
|
|
||||||
insecureTransport:
|
|
||||||
insecureSkipVerify: true
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
||||||
# Edge Traefik Static Configuration
|
|
||||||
# Managed by Ansible - do not edit manually
|
|
||||||
# This configuration proxies to backend servers on private network
|
|
||||||
|
|
||||||
api:
|
|
||||||
dashboard: {{ traefik_dashboard_enabled | lower }}
|
|
||||||
{% if traefik_dashboard_enabled %}
|
|
||||||
insecure: false
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
entryPoints:
|
|
||||||
web:
|
|
||||||
address: ":80"
|
|
||||||
http:
|
|
||||||
redirections:
|
|
||||||
entryPoint:
|
|
||||||
to: websecure
|
|
||||||
scheme: https
|
|
||||||
|
|
||||||
websecure:
|
|
||||||
address: ":443"
|
|
||||||
http:
|
|
||||||
tls:
|
|
||||||
certResolver: letsencrypt
|
|
||||||
|
|
||||||
providers:
|
|
||||||
# File provider for static backend configurations
|
|
||||||
file:
|
|
||||||
filename: /etc/traefik/dynamic.yml
|
|
||||||
watch: true
|
|
||||||
|
|
||||||
certificatesResolvers:
|
|
||||||
letsencrypt:
|
|
||||||
acme:
|
|
||||||
email: {{ traefik_acme_email }}
|
|
||||||
storage: /letsencrypt/acme.json
|
|
||||||
{% if traefik_acme_staging %}
|
|
||||||
caServer: https://acme-staging-v02.api.letsencrypt.org/directory
|
|
||||||
{% endif %}
|
|
||||||
httpChallenge:
|
|
||||||
entryPoint: web
|
|
||||||
|
|
||||||
log:
|
|
||||||
level: INFO
|
|
||||||
|
|
||||||
accessLog:
|
|
||||||
filePath: /var/log/traefik/access.log
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
---
|
|
||||||
# NAT Gateway Handlers
|
|
||||||
|
|
||||||
- name: Save iptables rules
|
|
||||||
shell: |
|
|
||||||
iptables-save > /etc/iptables/rules.v4
|
|
||||||
|
|
@ -1,66 +0,0 @@
|
||||||
---
|
|
||||||
# NAT Gateway Configuration
|
|
||||||
# Enables internet access for private network clients via edge server
|
|
||||||
|
|
||||||
- name: Enable IP forwarding
|
|
||||||
sysctl:
|
|
||||||
name: net.ipv4.ip_forward
|
|
||||||
value: '1'
|
|
||||||
state: present
|
|
||||||
reload: yes
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
||||||
- name: Install iptables-persistent
|
|
||||||
apt:
|
|
||||||
name: iptables-persistent
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
||||||
- name: Configure NAT (masquerading) for private network
|
|
||||||
iptables:
|
|
||||||
table: nat
|
|
||||||
chain: POSTROUTING
|
|
||||||
out_interface: eth0
|
|
||||||
source: 10.0.0.0/16
|
|
||||||
jump: MASQUERADE
|
|
||||||
comment: NAT for private network clients
|
|
||||||
notify: Save iptables rules
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
||||||
- name: Allow forwarding from private network (in DOCKER-USER chain)
|
|
||||||
iptables:
|
|
||||||
chain: DOCKER-USER
|
|
||||||
in_interface: enp7s0
|
|
||||||
out_interface: eth0
|
|
||||||
source: 10.0.0.0/16
|
|
||||||
jump: ACCEPT
|
|
||||||
comment: Allow forwarding from private network
|
|
||||||
notify: Save iptables rules
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
||||||
- name: Allow established connections back to private network (in DOCKER-USER chain)
|
|
||||||
iptables:
|
|
||||||
chain: DOCKER-USER
|
|
||||||
in_interface: eth0
|
|
||||||
out_interface: enp7s0
|
|
||||||
ctstate: ESTABLISHED,RELATED
|
|
||||||
jump: ACCEPT
|
|
||||||
comment: Allow established connections to private network
|
|
||||||
notify: Save iptables rules
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
||||||
- name: Return from DOCKER-USER chain for other traffic
|
|
||||||
iptables:
|
|
||||||
chain: DOCKER-USER
|
|
||||||
jump: RETURN
|
|
||||||
comment: Let Docker handle other traffic
|
|
||||||
notify: Save iptables rules
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
||||||
- name: Save iptables rules
|
|
||||||
shell: |
|
|
||||||
iptables-save > /etc/iptables/rules.v4
|
|
||||||
args:
|
|
||||||
creates: /etc/iptables/rules.v4
|
|
||||||
tags: [nat, gateway]
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJU6ntTc5bYP4FslcLXjm9C+RsO+hygmlsIo8tGOC1Up client-black-deploy-key
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFvJSvafujjq5eojqH/A66mDLLr7/G9o202QCma0SmPt client-dev-deploy-key
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICpzsMHVbAZMugslwn2mJnxg30zYrfU3t+zsZ7Lw3DDD edge-server-deploy-key
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHuR1BR4JaATFwOmLauvvfKjhHarPz1SfnJ+j0caqISr client-purple-deploy-key
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL+BKRVBWUnS2NSPLvP3nxW7oxcv5wfu2DAY1YP0M+6m client-white-deploy-key
|
|
||||||
|
|
@ -222,11 +222,9 @@ if [ ! -f "$HOST_VARS_FILE" ]; then
|
||||||
|
|
||||||
cat > "$HOST_VARS_FILE" << EOF
|
cat > "$HOST_VARS_FILE" << EOF
|
||||||
---
|
---
|
||||||
# ${CLIENT_NAME} server - behind edge proxy (private network only)
|
# ${CLIENT_NAME} server configuration
|
||||||
|
|
||||||
# SSH via edge server as bastion/jump host
|
|
||||||
ansible_host: ${PRIVATE_IP}
|
ansible_host: ${PRIVATE_IP}
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -i ../keys/ssh/edge -W %h:%p -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@78.47.191.38" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
|
|
||||||
|
|
||||||
# Client identification
|
# Client identification
|
||||||
client_name: ${CLIENT_NAME}
|
client_name: ${CLIENT_NAME}
|
||||||
|
|
|
||||||
151
scripts/run-maintenance-all-servers.sh
Executable file
151
scripts/run-maintenance-all-servers.sh
Executable file
|
|
@ -0,0 +1,151 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Run Nextcloud maintenance playbook on all servers
|
||||||
|
# Created: 2026-01-24
|
||||||
|
#
|
||||||
|
# This script runs the nextcloud maintenance playbook on each server
|
||||||
|
# with its corresponding SSH key.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# cd infrastructure/
|
||||||
|
# HCLOUD_TOKEN="..." ./scripts/run-maintenance-all-servers.sh
|
||||||
|
#
|
||||||
|
# Or with SOPS_AGE_KEY_FILE if needed:
|
||||||
|
# SOPS_AGE_KEY_FILE="keys/age-key.txt" HCLOUD_TOKEN="..." ./scripts/run-maintenance-all-servers.sh
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||||
|
ANSIBLE_DIR="$PROJECT_ROOT/ansible"
|
||||||
|
KEYS_DIR="$PROJECT_ROOT/keys/ssh"
|
||||||
|
PLAYBOOK="playbooks/260124-nextcloud-maintenance.yml"
|
||||||
|
|
||||||
|
# Check required environment variables
|
||||||
|
if [ -z "${HCLOUD_TOKEN:-}" ]; then
|
||||||
|
echo -e "${RED}Error: HCLOUD_TOKEN environment variable is required${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Change to ansible directory
|
||||||
|
cd "$ANSIBLE_DIR"
|
||||||
|
|
||||||
|
echo -e "${BLUE}============================================================${NC}"
|
||||||
|
echo -e "${BLUE}Nextcloud Maintenance - All Servers${NC}"
|
||||||
|
echo -e "${BLUE}============================================================${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "Playbook: $PLAYBOOK"
|
||||||
|
echo "Ansible directory: $ANSIBLE_DIR"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Get list of all servers with SSH keys
|
||||||
|
SERVERS=()
|
||||||
|
for keyfile in "$KEYS_DIR"/*.pub; do
|
||||||
|
if [ -f "$keyfile" ]; then
|
||||||
|
server=$(basename "$keyfile" .pub)
|
||||||
|
# Skip special servers
|
||||||
|
if [[ "$server" != "README" ]] && [[ "$server" != "edge" ]]; then
|
||||||
|
SERVERS+=("$server")
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo -e "${BLUE}Found ${#SERVERS[@]} servers:${NC}"
|
||||||
|
printf '%s\n' "${SERVERS[@]}" | sort
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Counters
|
||||||
|
SUCCESS_COUNT=0
|
||||||
|
FAILED_COUNT=0
|
||||||
|
SKIPPED_COUNT=0
|
||||||
|
declare -a SUCCESS_SERVERS
|
||||||
|
declare -a FAILED_SERVERS
|
||||||
|
declare -a SKIPPED_SERVERS
|
||||||
|
|
||||||
|
echo -e "${BLUE}============================================================${NC}"
|
||||||
|
echo -e "${BLUE}Starting maintenance run...${NC}"
|
||||||
|
echo -e "${BLUE}============================================================${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Run playbook for each server
|
||||||
|
for server in "${SERVERS[@]}"; do
|
||||||
|
echo -e "${YELLOW}-----------------------------------------------------------${NC}"
|
||||||
|
echo -e "${YELLOW}Processing: $server${NC}"
|
||||||
|
echo -e "${YELLOW}-----------------------------------------------------------${NC}"
|
||||||
|
|
||||||
|
SSH_KEY="$KEYS_DIR/$server"
|
||||||
|
|
||||||
|
if [ ! -f "$SSH_KEY" ]; then
|
||||||
|
echo -e "${RED}✗ SSH key not found: $SSH_KEY${NC}"
|
||||||
|
SKIPPED_COUNT=$((SKIPPED_COUNT + 1))
|
||||||
|
SKIPPED_SERVERS+=("$server")
|
||||||
|
echo ""
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run the playbook (with SSH options to prevent agent key issues)
|
||||||
|
if env HCLOUD_TOKEN="$HCLOUD_TOKEN" \
|
||||||
|
ANSIBLE_SSH_ARGS="-o IdentitiesOnly=yes" \
|
||||||
|
~/.local/bin/ansible-playbook \
|
||||||
|
-i hcloud.yml \
|
||||||
|
"$PLAYBOOK" \
|
||||||
|
--limit "$server" \
|
||||||
|
--private-key "$SSH_KEY" 2>&1; then
|
||||||
|
|
||||||
|
echo -e "${GREEN}✓ Success: $server${NC}"
|
||||||
|
SUCCESS_COUNT=$((SUCCESS_COUNT + 1))
|
||||||
|
SUCCESS_SERVERS+=("$server")
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗ Failed: $server${NC}"
|
||||||
|
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||||
|
FAILED_SERVERS+=("$server")
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
echo -e "${BLUE}============================================================${NC}"
|
||||||
|
echo -e "${BLUE}MAINTENANCE RUN SUMMARY${NC}"
|
||||||
|
echo -e "${BLUE}============================================================${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "Total servers: ${#SERVERS[@]}"
|
||||||
|
echo -e "${GREEN}Successful: $SUCCESS_COUNT${NC}"
|
||||||
|
echo -e "${RED}Failed: $FAILED_COUNT${NC}"
|
||||||
|
echo -e "${YELLOW}Skipped: $SKIPPED_COUNT${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ $SUCCESS_COUNT -gt 0 ]; then
|
||||||
|
echo -e "${GREEN}Successful servers:${NC}"
|
||||||
|
printf ' %s\n' "${SUCCESS_SERVERS[@]}"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $FAILED_COUNT -gt 0 ]; then
|
||||||
|
echo -e "${RED}Failed servers:${NC}"
|
||||||
|
printf ' %s\n' "${FAILED_SERVERS[@]}"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $SKIPPED_COUNT -gt 0 ]; then
|
||||||
|
echo -e "${YELLOW}Skipped servers:${NC}"
|
||||||
|
printf ' %s\n' "${SKIPPED_SERVERS[@]}"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${BLUE}============================================================${NC}"
|
||||||
|
|
||||||
|
# Exit with error if any failures
|
||||||
|
if [ $FAILED_COUNT -gt 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
#cloud-config
|
|
||||||
package_update: true
|
|
||||||
package_upgrade: true
|
|
||||||
packages:
|
|
||||||
- curl
|
|
||||||
- wget
|
|
||||||
- git
|
|
||||||
- python3
|
|
||||||
- python3-pip
|
|
||||||
runcmd:
|
|
||||||
- hostnamectl set-hostname ${hostname}
|
|
||||||
- |
|
|
||||||
# Configure default route for private-only server
|
|
||||||
# Hetzner network route forwards traffic to edge gateway (10.0.0.2)
|
|
||||||
# Enable DHCP to get IP from Hetzner Cloud private network
|
|
||||||
cat > /etc/netplan/60-private-network.yaml <<'NETPLAN'
|
|
||||||
network:
|
|
||||||
version: 2
|
|
||||||
ethernets:
|
|
||||||
enp7s0:
|
|
||||||
dhcp4: true
|
|
||||||
dhcp4-overrides:
|
|
||||||
use-routes: false
|
|
||||||
routes:
|
|
||||||
- to: default
|
|
||||||
via: 10.0.0.1
|
|
||||||
NETPLAN
|
|
||||||
chmod 600 /etc/netplan/60-private-network.yaml
|
|
||||||
netplan apply
|
|
||||||
Loading…
Add table
Reference in a new issue