From f79b93ecc9cea0c037b1d49923c397f09946ddd5 Mon Sep 17 00:00:00 2001 From: Micha Date: Thu, 19 Mar 2026 20:20:58 +0100 Subject: [PATCH] initial homelab structure from NAS --- 00_setup-networks.sh | 38 ++ 01_paperless.sql | 37 ++ 01_stack-backend.yml | 89 +++++ 02_stack-dns.yml | 88 +++++ 03_stack-frontend.yml | 512 ++++++++++++++++++++++++++ 04_stack-traefik.yml | 93 +++++ MIGRATION.md | 295 +++++++++++++++ infra/ddns-updater/docker-compose.yml | 30 ++ infra/traefik/docker-compose.yml | 62 ++++ 9 files changed, 1244 insertions(+) create mode 100755 00_setup-networks.sh create mode 100755 01_paperless.sql create mode 100755 01_stack-backend.yml create mode 100755 02_stack-dns.yml create mode 100644 03_stack-frontend.yml create mode 100755 04_stack-traefik.yml create mode 100755 MIGRATION.md create mode 100644 infra/ddns-updater/docker-compose.yml create mode 100644 infra/traefik/docker-compose.yml diff --git a/00_setup-networks.sh b/00_setup-networks.sh new file mode 100755 index 0000000..c565c52 --- /dev/null +++ b/00_setup-networks.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# ============================================================================= +# 00_setup-networks.sh – Phase 1 +# Erstellt alle gemeinsamen Homelab-Netzwerke. +# Sicher jederzeit ausführbar — stört keine laufenden Container. +# ============================================================================= + +set -e + +echo ">>> Erstelle gemeinsame Homelab-Netzwerke..." + +docker network create \ + --driver bridge \ + --subnet 172.30.0.0/24 \ + --gateway 172.30.0.1 \ + --label net.homelab.role=frontend \ + frontend_net && echo " ✓ frontend_net (172.30.0.0/24)" + +docker network create \ + --driver bridge \ + --internal \ + --subnet 172.21.0.0/24 \ + --gateway 172.21.0.1 \ + --label net.homelab.role=backend \ + backend_net && echo " ✓ backend_net (172.21.0.0/24, internal)" + +docker network create \ + --driver bridge \ + --subnet 172.23.0.0/24 \ + --gateway 172.23.0.1 \ + --label net.homelab.role=dns \ + dns_net && echo " ✓ dns_net (172.23.0.0/24)" + +echo "" +echo ">>> Ergebnis:" +docker network ls | grep -E "NAME|frontend_net|backend_net|dns_net" +echo "" +echo ">>> Nächster Schritt laut MIGRATION.md: Phase 2 (DNS)" diff --git a/01_paperless.sql b/01_paperless.sql new file mode 100755 index 0000000..1145ea3 --- /dev/null +++ b/01_paperless.sql @@ -0,0 +1,37 @@ +-- ============================================================================= +-- 01_paperless.sql +-- Legt paperless-User und -Datenbank in postgresql17 an. +-- +-- NICHT als Volume eingebunden — manuell per stdin einspielen: +-- docker exec -i postgresql17 psql -U postgres < 01_paperless.sql +-- +-- Nur ausführen wenn User/DB noch NICHT existieren (Phase 3a prüfen!). +-- Passwort hier muss mit PAPERLESS_DB_PASSWORD in .env übereinstimmen. +-- ============================================================================= + +-- User anlegen (idempotent) +DO $$ +BEGIN + IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'paperless') THEN + CREATE ROLE paperless WITH LOGIN PASSWORD 'affentanz2345'; + RAISE NOTICE 'User paperless angelegt.'; + ELSE + ALTER ROLE paperless WITH PASSWORD 'affentanz2345'; + RAISE NOTICE 'User paperless existiert bereits — Passwort aktualisiert.'; + END IF; +END +$$; + +-- Datenbank anlegen (idempotent) +SELECT 'CREATE DATABASE paperless OWNER paperless' +WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'paperless')\gexec + +-- Berechtigungen +GRANT ALL PRIVILEGES ON DATABASE paperless TO paperless; + +-- Prüfausgabe +\echo '' +\echo '--- Datenbanken ---' +\l paperless +\echo '--- Rollen ---' +\du paperless diff --git a/01_stack-backend.yml b/01_stack-backend.yml new file mode 100755 index 0000000..35a60e2 --- /dev/null +++ b/01_stack-backend.yml @@ -0,0 +1,89 @@ +# ============================================================================= +# 01_stack-backend.yml – Phase 3 +# Datenbank-Stack: postgresql17 + Redis +# ============================================================================= +# +# FIXES in dieser Version: +# - SQL-Init-Script NICHT mehr als Volume gemountet (war Phantom-Pfad) +# → Init läuft manuell per stdin, siehe MIGRATION.md Phase 3a +# - Redis auf gepinnter Version (nicht mehr :latest) +# - Kein ports:-Block → 5432 und 6379 nicht mehr vom Host erreichbar +# +# VORAUSSETZUNG vor dem Start: +# 1. secrets/postgres_password.txt anlegen (sonst startet Stack nicht!) +# 2. Phase 3a in MIGRATION.md ausführen (DB/User prüfen oder anlegen) +# 3. Backend-Health prüfen bevor Frontend gestartet wird +# +# ============================================================================= + +networks: + backend_net: + external: true + +services: + + # --------------------------------------------------------------------------- + # POSTGRESQL 17 + # Kein ports:-Block → Port 5432 nicht mehr vom LAN erreichbar. + # Kein SQL-Mount → Init läuft manuell per stdin (siehe MIGRATION.md). + # DNS-Name "postgresql17" ist nur aus backend_net auflösbar. + # --------------------------------------------------------------------------- + postgresql17: + image: postgres:17 # gepinnt auf Major-Version + container_name: postgresql17 + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/postgresql17:/var/lib/postgresql/data + networks: + backend_net: + ipv4_address: 172.21.0.10 + environment: + POSTGRES_PASSWORD_FILE: /run/secrets/postgres_password + POSTGRES_USER: postgres + secrets: + - postgres_password + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 30s + timeout: 10s + retries: 5 + start_period: 20s + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://github.com/juusujanar/unraid-templates/raw/master/img/PostgreSQL-logo.png" + + # --------------------------------------------------------------------------- + # REDIS 7 + # Kein ports:-Block → Port 6379 nicht mehr vom LAN erreichbar. + # Version gepinnt auf 7-alpine (war :latest). + # Passwort aus .env — dieselbe Variable wie in paperless-ngx! + # --------------------------------------------------------------------------- + Redis: + image: redis:7-alpine # gepinnt — nicht mehr :latest + container_name: Redis + restart: unless-stopped + security_opt: + - no-new-privileges:true + command: > + redis-server + --requirepass "${REDIS_PASSWORD}" + --maxmemory 256mb + --maxmemory-policy allkeys-lru + --save "" + networks: + backend_net: + ipv4_address: 172.21.0.11 + healthcheck: + test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "${REDIS_PASSWORD}", "ping"] + interval: 30s + timeout: 5s + retries: 3 + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/juusujanar/unraid-templates/master/img/Redis-logo.png" + +secrets: + postgres_password: + file: ./secrets/postgres_password.txt diff --git a/02_stack-dns.yml b/02_stack-dns.yml new file mode 100755 index 0000000..e0f686f --- /dev/null +++ b/02_stack-dns.yml @@ -0,0 +1,88 @@ +# ============================================================================= +# 02_stack-dns.yml – Phase 2 +# DNS-Stack: unbound (rekursiver Resolver) + pihole (DNS-Filter) +# ============================================================================= +# +# Starten in dieser Reihenfolge (depends_on erzwingt das bereits): +# 1. unbound hoch + healthy abwarten +# 2. dann pihole hoch +# +# WICHTIG vor dem Start: +# - Wenn pihole dein einziger DNS-Server ist: +# Router-DNS kurz auf 1.1.1.1 setzen, migrieren, zurücksetzen +# - pihole macht bei dir kein DHCP → bridge (dns_net) ist korrekt +# - Port 53 muss öffentlich bleiben (DNS-Dienst für LAN) +# +# ============================================================================= + +networks: + dns_net: + external: true + +services: + + # --------------------------------------------------------------------------- + # UNBOUND – Rekursiver DNS-Resolver + # Braucht Outbound ins Internet (rekursive Auflösung zu Root-Servern). + # dns_net ist daher nicht internal:true. + # Kein eingehender Host-Port — pihole erreicht unbound per DNS-Name. + # --------------------------------------------------------------------------- + unbound: + image: kutzilla/unbound:latest + container_name: unbound + restart: unless-stopped + security_opt: + - no-new-privileges:true + networks: + dns_net: + ipv4_address: 172.23.0.10 + healthcheck: + test: ["CMD-SHELL", "drill @127.0.0.1 cloudflare.com || exit 1"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/kutzilla/unraid-templates/master/images/unbound.png" + + # --------------------------------------------------------------------------- + # PIHOLE – DNS-Filter + # Upstream: "unbound#53" → Docker-DNS löst "unbound" → 172.23.0.10 + # Port 53 bleibt öffentlich (DNS-Dienst für LAN ist zwingend nötig). + # Admin-UI auf Port 8155 bleibt direkt erreichbar (DNS muss auch ohne + # Proxy funktionieren — Henne-Ei-Problem mit Traefik vermeiden). + # --------------------------------------------------------------------------- + binhex-official-pihole: + image: pihole/pihole:2026.02.0 # gepinnt auf bekannte stabile Version + container_name: binhex-official-pihole + restart: unless-stopped + depends_on: + unbound: + condition: service_healthy + networks: + dns_net: + ipv4_address: 172.23.0.20 + ports: + - "53:53/tcp" + - "53:53/udp" + - "8155:80/tcp" + volumes: + - /mnt/user/appdata/official-pihole/pihole:/etc/pihole + - /mnt/user/appdata/official-pihole/pihole/dnsmasq:/etc/dnsmasq.d + environment: + TZ: "Europe/Berlin" + WEBPASSWORD: "${PIHOLE_WEBPASSWORD}" + PIHOLE_DNS_: "unbound#53" + DNSSEC: "true" + DNSMASQ_LISTENING: "all" + healthcheck: + test: ["CMD", "curl", "-sf", "http://localhost/admin/"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 20s + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/binhex/docker-templates/master/binhex/images/official-pihole-icon.png" + net.unraid.docker.webui: "http://[IP]:[PORT:8155]/admin" diff --git a/03_stack-frontend.yml b/03_stack-frontend.yml new file mode 100644 index 0000000..143f822 --- /dev/null +++ b/03_stack-frontend.yml @@ -0,0 +1,512 @@ +# ============================================================================= +# 03_stack-frontend.yml – Phase 4 +# Web-App-Stack: alle Container mit Web-UI +# ============================================================================= +# +# FIXES in dieser Version: +# - Portainer: Port-Mapping korrigiert (9000:9000 + 9443:9443) +# - paperless-ngx: Redis-URL mit Auth (redis://:PASS@Redis:6379) +# - paperless-ngx: dual-homed frontend_net + backend_net +# - Vaultwarden: TLS-Situation dokumentiert und geklärt (siehe unten) +# - Kritische Images gepinnt (redis, portainer, pihole, paperless) +# +# TRAEFIK: noch nicht vorhanden +# → Host-Ports bleiben als bewusste Übergangslösung offen +# → traefik.enable=false überall — Labels sind vorbereitet, aber inaktiv +# → Aktivierung: siehe 04_stack-traefik.yml und MIGRATION.md Phase 5 +# +# STARTREIHENFOLGE (depends_on greift nicht über Stack-Grenzen!): +# 1. 01_stack-backend.yml hochfahren und Health abwarten +# 2. Erst dann paperless-ngx starten +# 3. Alle anderen können parallel starten +# Nicht blind: docker compose -f 03_stack-frontend.yml up -d +# Besser: Wellen laut MIGRATION.md Phase 4 +# +# ============================================================================= + +networks: + frontend_net: + external: true + backend_net: + external: true + +services: + + # --------------------------------------------------------------------------- + # PORTAINER CE + # FIX: Port-Mapping war falsch verdreht. + # Korrekt: 9000=HTTP-UI, 9443=HTTPS-UI + # --------------------------------------------------------------------------- + PortainerCE: + image: portainer/portainer-ce:2.21.5 # gepinnt + container_name: PortainerCE + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/portainer:/data + - /var/run/docker.sock:/var/run/docker.sock:rw + networks: + frontend_net: + ipv4_address: 172.30.0.10 + ports: + - "9000:9000" # HTTP-UI + - "9443:9443" # HTTPS-UI + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/EMP83/unraid-templates/main/PortainerCE/Portainer.png" + net.unraid.docker.webui: "http://[IP]:[PORT:9000]/" + traefik.enable: "false" + traefik.http.routers.portainer.rule: "Host(`portainer.yourdomain.tld`)" + traefik.http.routers.portainer.entrypoints: "websecure" + traefik.http.routers.portainer.tls.certresolver: "letsencrypt" + traefik.http.services.portainer.loadbalancer.server.port: "9000" + traefik.docker.network: "frontend_net" + + # --------------------------------------------------------------------------- + # PAPERLESS-NGX ← DUAL-HOMED + # FIX 1: Redis-URL mit Passwort — ohne das crasht Paperless beim Start + # FIX 2: in frontend_net UND backend_net gleichzeitig + # + # ACHTUNG: Nicht starten bevor 01_stack-backend.yml healthy ist! + # postgresql17 und Redis müssen laufen, sonst schlägt der Start fehl. + # restart:unless-stopped fängt das zwar ab, aber bewusst warten ist besser. + # --------------------------------------------------------------------------- + paperless-ngx: + image: ghcr.io/paperless-ngx/paperless-ngx:2.20.10 # gepinnt + container_name: paperless-ngx + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/documents/paperless:/usr/src/paperless/media + - /mnt/user/documents/scans_inbox:/usr/src/paperless/consume + - /mnt/user/appdata/paperless-ngx/data:/usr/src/paperless/data + - /mnt/user/documents/paperless/export:/usr/src/paperless/export + networks: + frontend_net: + ipv4_address: 172.30.0.17 # Traefik / Browser → paperless hier + backend_net: + ipv4_address: 172.21.0.20 # paperless → postgresql17 + Redis hier + ports: + - "8000:8000" # Übergang bis Traefik aktiv + environment: + PAPERLESS_DBENGINE: "postgresql" + PAPERLESS_DBHOST: "postgresql17" + PAPERLESS_DBPORT: "5432" + PAPERLESS_DBNAME: "paperless" + PAPERLESS_DBUSER: "paperless" + PAPERLESS_DBPASS: "${PAPERLESS_DB_PASSWORD}" + # FIX: Redis mit Auth — REDIS_PASSWORD kommt aus derselben .env wie Redis selbst + PAPERLESS_REDIS: "redis://:${REDIS_PASSWORD}@Redis:6379" + PAPERLESS_TIME_ZONE: "Europe/Berlin" + PAPERLESS_OCR_LANGUAGE: "deu+eng" + PAPERLESS_TIKA_ENABLED: "0" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/selfhosters/unRAID-CA-templates/master/templates/img/paperless.png" + net.unraid.docker.webui: "http://[IP]:[PORT:8000]" + traefik.enable: "false" + traefik.http.routers.paperless.rule: "Host(`paperless.yourdomain.tld`)" + traefik.http.routers.paperless.entrypoints: "websecure" + traefik.http.routers.paperless.tls.certresolver: "letsencrypt" + traefik.http.services.paperless.loadbalancer.server.port: "8000" + traefik.docker.network: "frontend_net" + + # --------------------------------------------------------------------------- + # VAULTWARDEN + # + # TLS-SITUATION (bitte einmal bewusst prüfen): + # Aktuell: Port 4743 → HTTP (Port 80 intern) + # Die gemounteten Zertifikate (cert.pem / key.pem) werden von Vaultwarden + # nur genutzt, wenn ROCKET_TLS in den Env-Vars gesetzt ist. + # Ohne diese Variable spricht Vaultwarden intern HTTP — die Zertifikate + # haben dann keinen Effekt. + # + # Wahrscheinlichstes Szenario: Tailscale terminiert TLS davor → intern + # läuft es HTTP → das ist bewusst so und vollkommen okay. + # + # Falls du HTTPS direkt in Vaultwarden willst, ergänze: + # environment: + # ROCKET_TLS: '{certs="/ssl/cert.pem",key="/ssl/key.pem"}' + # + # Für Phase 1: so lassen, Tailscale-Zugang funktioniert weiterhin. + # --------------------------------------------------------------------------- + vaultwarden: + image: vaultwarden/server:1.35.4 # gepinnt + container_name: vaultwarden + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/vaultwarden:/data + - /mnt/user/appdata/vaultwarden/kallilabcore.taild9fcf2.ts.net.crt:/ssl/cert.pem:ro + - /mnt/user/appdata/vaultwarden/kallilabcore.taild9fcf2.ts.net.key:/ssl/key.pem + networks: + frontend_net: + ipv4_address: 172.30.0.16 + ports: + - "4743:80" # Tailscale terminiert TLS davor — bewusst so + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/selfhosters/unRAID-CA-templates/master/templates/img/vaultwarden.png" + net.unraid.docker.webui: "https://kallilabcore.taild9fcf2.ts.net:4743/admin" + traefik.enable: "false" + traefik.http.routers.vault.rule: "Host(`vault.yourdomain.tld`)" + traefik.http.routers.vault.entrypoints: "websecure" + traefik.http.services.vault.loadbalancer.server.port: "80" + traefik.docker.network: "frontend_net" + + # --------------------------------------------------------------------------- + # PAPERLESS-AI + # --------------------------------------------------------------------------- + Paperless-AI: + image: clusterzx/paperless-ai:latest + container_name: Paperless-AI + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/paperless-ai:/app/data + networks: + frontend_net: + ipv4_address: 172.30.0.18 + ports: + - "3236:3000" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/nwithan8/unraid_templates/master/images/paperless-ai-icon.png" + net.unraid.docker.webui: "http://[IP]:[PORT:3000]/" + traefik.enable: "false" + traefik.http.routers.paperless-ai.rule: "Host(`paperless-ai.yourdomain.tld`)" + traefik.http.routers.paperless-ai.entrypoints: "websecure" + traefik.http.services.paperless-ai.loadbalancer.server.port: "3000" + traefik.docker.network: "frontend_net" + + # --------------------------------------------------------------------------- + # MAIL-ARCHIVER + # Phase 1: konservativ — externe DB bleibt auf 192.168.178.58 + # Separate Migration in Phase 3c (MIGRATION.md) + # --------------------------------------------------------------------------- + mail-archiver: + image: s1t5/mailarchiver + container_name: mail-archiver + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/mailarchiver/data-protection-keys:/app/DataProtection-Keys:rw + networks: + frontend_net: + ipv4_address: 172.30.0.19 + ports: + - "5000:5000" + environment: + TZ: "Europe/Berlin" + TimeZone__DisplayTimeZoneId: "Europe/Berlin" + ConnectionStrings__DefaultConnection: "Host=192.168.178.58;Port=5432;Database=mailarchiver;Username=mailarchiver;Password=${MAILARCHIVER_DB_PASSWORD}" + Authentication__Username: "${MAILARCHIVER_USERNAME}" + Authentication__Password: "${MAILARCHIVER_PASSWORD}" + Authentication__CookieName: "MailArchiverAuth" + Authentication__SessionTimeoutMinutes: "60" + OAuth__Enabled: "false" + MailSync__IntervalMinutes: "15" + MailSync__TimeoutMinutes: "60" + MailSync__IgnoreSelfSignedCert: "false" + MailSync__ConnectionTimeoutSeconds: "180" + MailSync__CommandTimeoutSeconds: "300" + BatchOperation__BatchSize: "50" + BatchOperation__PauseBetweenEmailsMs: "50" + BatchOperation__PauseBetweenBatchesMs: "250" + BatchRestore__MaxAsyncEmails: "50000" + BatchRestore__MaxSyncEmails: "150" + BatchRestore__AsyncThreshold: "50" + BatchRestore__DefaultBatchSize: "50" + BatchRestore__SessionTimeoutMinutes: "30" + Upload__MaxFileSizeGB: "10" + Upload__RequestHeadersTimeoutHours: "2" + Upload__KeepAliveTimeoutHours: "4" + Selection__MaxSelectableEmails: "250" + Npgsql__CommandTimeout: "900" + HOST_OS: "Unraid" + HOST_HOSTNAME: "Kallilabcore" + HOST_CONTAINERNAME: "mail-archiver" + ASPNETCORE_ENVIRONMENT: "Production" + ASPNETCORE_URLS: "http://+:5000" + logging: + driver: json-file + options: + max-size: "50m" + max-file: "1" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/rschuiling/unraid-templates/refs/heads/main/icons/envelope-check.png" + net.unraid.docker.webui: "http://[IP]:[PORT:5000]" + traefik.enable: "false" + traefik.http.routers.mail.rule: "Host(`mail.yourdomain.tld`)" + traefik.http.routers.mail.entrypoints: "websecure" + traefik.http.services.mail.loadbalancer.server.port: "5000" + traefik.docker.network: "frontend_net" + + # =========================================================================== + # WELLE A – unkritisch, jederzeit migrierbar + # =========================================================================== + + homarr: + image: ghcr.io/homarr-labs/homarr:latest + container_name: homarr + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - /mnt/user/appdata/homarr/appdata:/appdata + networks: + frontend_net: + ipv4_address: 172.30.0.11 + ports: + - "10004:7575" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/manuel-rw/unraid-templates/master/templates/homarr/icon.png" + net.unraid.docker.webui: "http://[IP]:[PORT:7575]/" + traefik.enable: "false" + traefik.http.routers.homarr.rule: "Host(`homarr.yourdomain.tld`)" + traefik.http.routers.homarr.entrypoints: "websecure" + traefik.http.services.homarr.loadbalancer.server.port: "7575" + traefik.docker.network: "frontend_net" + + homepage: + image: ghcr.io/gethomepage/homepage:latest + container_name: homepage + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/homepage:/app/config + - /mnt/user/appdata/homepage/config/public/images:/app/public/images + - /var/run/docker.sock:/var/run/docker.sock:ro + networks: + frontend_net: + ipv4_address: 172.30.0.12 + ports: + - "3000:3000" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/gethomepage/homepage/main/public/android-chrome-512x512.png" + net.unraid.docker.webui: "http://[IP]:[PORT:3000]" + traefik.enable: "false" + traefik.http.routers.homepage.rule: "Host(`home.yourdomain.tld`)" + traefik.http.routers.homepage.entrypoints: "websecure" + traefik.http.services.homepage.loadbalancer.server.port: "3000" + traefik.docker.network: "frontend_net" + + Dozzle: + image: amir20/dozzle:v10.1.1 # gepinnt + container_name: Dozzle + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/dozzle:/data + - /var/run/docker.sock:/var/run/docker.sock:ro + networks: + frontend_net: + ipv4_address: 172.30.0.13 + ports: + - "9888:8080" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/selfhosters/unRAID-CA-templates/master/templates/img/dozzle.png" + net.unraid.docker.webui: "http://[IP]:[PORT:8080]/" + traefik.enable: "false" + traefik.http.routers.dozzle.rule: "Host(`dozzle.yourdomain.tld`)" + traefik.http.routers.dozzle.entrypoints: "websecure" + traefik.http.services.dozzle.loadbalancer.server.port: "8080" + traefik.docker.network: "frontend_net" + + dashdot: + image: mauricenino/dashdot:latest + container_name: dashdot + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /etc/os-release:/etc/os-release:ro + - /mnt:/mnt/host:ro + networks: + frontend_net: + ipv4_address: 172.30.0.22 + ports: + - "3002:3001" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/manuel-rw/unraid-templates/master/templates/dashdot/dashdot.png" + net.unraid.docker.webui: "http://[IP]:[PORT:3001]/" + traefik.enable: "false" + traefik.http.routers.dashdot.rule: "Host(`dash.yourdomain.tld`)" + traefik.http.routers.dashdot.entrypoints: "websecure" + traefik.http.services.dashdot.loadbalancer.server.port: "3001" + traefik.docker.network: "frontend_net" + + theme-park: + image: ghcr.io/themepark-dev/theme.park:1.22.0 # gepinnt + container_name: theme-park + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/theme-park:/config + networks: + frontend_net: + ipv4_address: 172.30.0.24 + ports: + - "8009:80" + - "32770:443" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/selfhosters/unRAID-CA-templates/master/templates/img/themepark.png" + net.unraid.docker.webui: "http://[IP]:[PORT:80]" + traefik.enable: "false" + traefik.http.routers.themepark.rule: "Host(`theme.yourdomain.tld`)" + traefik.http.routers.themepark.entrypoints: "websecure" + traefik.http.services.themepark.loadbalancer.server.port: "80" + traefik.docker.network: "frontend_net" + + # =========================================================================== + # WELLE B – mittelkritisch + # =========================================================================== + + UptimeKuma: + image: louislam/uptime-kuma:1 + container_name: UptimeKuma + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/uptimekuma:/app/data + - /var/run/docker.sock:/var/run/docker.sock:ro + networks: + frontend_net: + ipv4_address: 172.30.0.14 + ports: + - "3001:3001" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/CorneliousJD/Docker-Templates/master/icons/uptimekuma.png" + net.unraid.docker.webui: "http://[IP]:[PORT:3001]" + traefik.enable: "false" + traefik.http.routers.uptime.rule: "Host(`uptime.yourdomain.tld`)" + traefik.http.routers.uptime.entrypoints: "websecure" + traefik.http.services.uptime.loadbalancer.server.port: "3001" + traefik.docker.network: "frontend_net" + + scrutiny: + image: ghcr.io/starosdev/scrutiny:latest-omnibus + container_name: scrutiny + restart: unless-stopped + cap_add: + - SYS_RAWIO + devices: + - /dev/sdb:/dev/sdb + - /dev/sdc:/dev/sdc + - /dev/nvme0n1:/dev/nvme0n1 + volumes: + - /mnt/user/appdata/scrutiny/config:/opt/scrutiny/config + - /mnt/user/appdata/scrutiny/influxdb:/opt/scrutiny/influxdb + - /run/udev:/run/udev:ro + networks: + frontend_net: + ipv4_address: 172.30.0.15 + ports: + - "8080:8080" + - "8086:8086" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/Starosdev/scrutiny/master/webapp/frontend/src/assets/images/logo/scrutiny-logo-dark.png" + net.unraid.docker.webui: "http://[IP]:[PORT:8080]/web/dashboard" + traefik.enable: "false" + traefik.http.routers.scrutiny.rule: "Host(`scrutiny.yourdomain.tld`)" + traefik.http.routers.scrutiny.entrypoints: "websecure" + traefik.http.services.scrutiny.loadbalancer.server.port: "8080" + traefik.docker.network: "frontend_net" + + code-server: + image: lscr.io/linuxserver/code-server:latest + container_name: code-server + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/code-server:/config + - /mnt/user:/mnt/user + networks: + frontend_net: + ipv4_address: 172.30.0.20 + ports: + - "7258:8443" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/code-server-logo.png" + net.unraid.docker.webui: "http://[IP]:[PORT:8443]" + traefik.enable: "false" + traefik.http.routers.code.rule: "Host(`code.yourdomain.tld`)" + traefik.http.routers.code.entrypoints: "websecure" + traefik.http.services.code.loadbalancer.server.port: "8443" + traefik.docker.network: "frontend_net" + + # =========================================================================== + # WELLE C – sensibel, einzeln migrieren + # =========================================================================== + + luckyBackup: + image: ghcr.io/ich777/luckybackup + container_name: luckyBackup + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user:/mnt/user + - /mnt/cache/appdata/luckybackup:/luckybackup + networks: + frontend_net: + ipv4_address: 172.30.0.23 + ports: + - "7675:8080" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/ich777/docker-templates/master/ich777/images/luckybackup.png" + net.unraid.docker.webui: "http://[IP]:[PORT:8080]/vnc.html?autoconnect=true" + traefik.enable: "false" + traefik.http.routers.backup.rule: "Host(`backup.yourdomain.tld`)" + traefik.http.routers.backup.entrypoints: "websecure" + traefik.http.services.backup.loadbalancer.server.port: "8080" + traefik.docker.network: "frontend_net" + + Stash: + image: stashapp/stash + container_name: Stash + restart: unless-stopped + security_opt: + - no-new-privileges:true + volumes: + - /mnt/user/appdata/stash/blobs:/blobs + - /mnt/user/appdata/stash/cache:/cache + - /mnt/user/media/Heimatfilme:/data + - /mnt/user/appdata/stash/generated:/generated + - /mnt/user/appdata/stash/metadata:/metadata + - /mnt/user/appdata/stash/config:/root/.stash + networks: + frontend_net: + ipv4_address: 172.30.0.21 + ports: + - "6969:9999" + labels: + net.unraid.docker.managed: "dockerman" + net.unraid.docker.icon: "https://raw.githubusercontent.com/CorneliousJD/Docker-Templates/master/icons/stash.png" + net.unraid.docker.webui: "http://[IP]:[PORT:9999]" + traefik.enable: "false" + traefik.http.routers.stash.rule: "Host(`stash.yourdomain.tld`)" + traefik.http.routers.stash.entrypoints: "websecure" + traefik.http.services.stash.loadbalancer.server.port: "9999" + traefik.docker.network: "frontend_net" diff --git a/04_stack-traefik.yml b/04_stack-traefik.yml new file mode 100755 index 0000000..218a6f2 --- /dev/null +++ b/04_stack-traefik.yml @@ -0,0 +1,93 @@ +# ============================================================================= +# 04_stack-traefik.yml – Phase 5 (separat, wenn bereit) +# ============================================================================= +# WANN: Erst wenn Phase 1–4 stabil laufen. +# NICHT gleichzeitig mit der Netz-Migration starten. +# +# Voraussetzungen: +# - Domain + DNS-Eintrag vorhanden +# - Port 80 + 443 auf Router weitergeleitet +# - ./traefik/traefik.yml angelegt (Vorlage unten) +# - ./traefik/dynamic/middlewares.yml angelegt (Vorlage unten) +# +# Nach dem Start pro Container in 03_stack-frontend.yml: +# - traefik.enable: "false" → "true" +# - yourdomain.tld anpassen +# - ports:-Block auskommentieren +# - docker compose -f 03_stack-frontend.yml up -d --force-recreate +# ============================================================================= + +networks: + frontend_net: + external: true + +services: + traefik: + image: traefik:v3.3 + container_name: traefik + restart: unless-stopped + security_opt: + - no-new-privileges:true + ports: + - "80:80" + - "443:443" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - traefik_certs:/certs + - ./traefik/traefik.yml:/etc/traefik/traefik.yml:ro + - ./traefik/dynamic:/etc/traefik/dynamic:ro + networks: + frontend_net: + ipv4_address: 172.20.0.2 # statische IP — Infrastrukturanker + labels: + traefik.enable: "true" + traefik.http.routers.traefik-dash.rule: "Host(`traefik.yourdomain.tld`)" + traefik.http.routers.traefik-dash.entrypoints: "websecure" + traefik.http.routers.traefik-dash.tls.certresolver: "letsencrypt" + traefik.http.routers.traefik-dash.service: "api@internal" + traefik.http.routers.traefik-dash.middlewares: "auth@file" + traefik.docker.network: "frontend_net" + +volumes: + traefik_certs: + name: traefik_certs + +# ============================================================================= +# traefik/traefik.yml (Vorlage): +# ============================================================================= +# api: +# dashboard: true +# entryPoints: +# web: +# address: ":80" +# http: +# redirections: +# entryPoint: +# to: websecure +# scheme: https +# websecure: +# address: ":443" +# certificatesResolvers: +# letsencrypt: +# acme: +# email: deine@email.de +# storage: /certs/acme.json +# httpChallenge: +# entryPoint: web +# providers: +# docker: +# exposedByDefault: false +# network: frontend_net +# file: +# directory: /etc/traefik/dynamic +# watch: true +# +# ============================================================================= +# traefik/dynamic/middlewares.yml (Vorlage): +# ============================================================================= +# http: +# middlewares: +# auth: +# basicAuth: +# users: +# - "admin:$apr1$..." # htpasswd generieren: htpasswd -nb admin passwort diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100755 index 0000000..a4218e7 --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,295 @@ +# Homelab Migration – Operativer Plan + +## Dateistruktur + +``` +homelab/ +├── 00_setup-networks.sh +├── 01_stack-backend.yml +├── 01_paperless.sql ← manuell per stdin einspielen, nicht gemountet +├── 02_stack-dns.yml +├── 03_stack-frontend.yml +├── 04_stack-traefik.yml ← Phase 5, noch nicht aktiv +├── .env ← nicht in Git! +├── .gitignore +├── secrets/ +│ └── postgres_password.txt ← nicht in Git!, muss vor Phase 3 existieren +└── traefik/ ← Phase 5 vorbereiten + ├── traefik.yml + └── dynamic/ + └── middlewares.yml +``` + +--- + +## Phase 0 – Vorbereitung (nichts migrieren, nur prüfen) + +### 0a – Zugangsdaten rotieren +Die Mail-Archiver-Credentials waren in `docker inspect` im Klartext sichtbar. +```bash +# Neue Passwörter in .env setzen: +# MAILARCHIVER_DB_PASSWORD, MAILARCHIVER_USERNAME, MAILARCHIVER_PASSWORD +``` + +### 0b – Secrets anlegen +```bash +mkdir -p secrets +echo "sicheres_postgres_passwort" > secrets/postgres_password.txt +chmod 600 secrets/postgres_password.txt +# Dieses Passwort ist das POSTGRES Superuser-Passwort, nicht das paperless-Passwort! +``` + +### 0c – .env befüllen +Alle `AENDERN_*`-Werte in `.env` durch echte Passwörter ersetzen. + +**KRITISCH:** `REDIS_PASSWORD` und `PAPERLESS_DB_PASSWORD` müssen konsistent sein: +- `REDIS_PASSWORD` → steht in `.env`, wird von Redis Container UND paperless-ngx verwendet +- `PAPERLESS_DB_PASSWORD` → steht in `.env`, muss in `01_paperless.sql` gespiegelt sein + +### 0d – PostgreSQL-Zustand prüfen +```bash +# Läuft postgresql17 gerade? +docker exec -it postgresql17 psql -U postgres -c "\l" +docker exec -it postgresql17 psql -U postgres -c "\du" + +# Fragen die beantwortet sein müssen: +# 1. Existiert ein User "paperless" bereits? +# 2. Existiert eine DB "paperless" bereits? +# 3. Liegt mailarchiver-DB ebenfalls auf dieser Instanz? +# (docker exec -it postgresql17 psql -U postgres -c "\l" zeigt alle DBs) +``` + +### 0e – 01_paperless.sql vorbereiten +Passwort-Platzhalter ersetzen: +```bash +# PAPERLESS_DB_PASSWORD aus .env nehmen und hier eintragen: +sed -i 's/HIER_PAPERLESS_DB_PASSWORD_EINTRAGEN/dein_passwort/g' 01_paperless.sql +``` + +--- + +## Phase 1 – Infrastruktur-Netze +**Dauer: ~3 Min | Kein Ausfall | Sicher jederzeit** + +```bash +chmod +x 00_setup-networks.sh +./00_setup-networks.sh + +# ntopng entfernen +docker stop ntopng && docker rm ntopng + +# Prüfen +docker network ls | grep -E "frontend_net|backend_net|dns_net" +``` + +✅ Immich, scanopy, host-Netz-Container → **nicht anfassen** + +--- + +## Phase 2 – DNS +**Dauer: ~10 Min | DNS-Ausfall: ~60 Sekunden** + +### Vorbereitung +```bash +# Aktuellen Pi-hole-Stand dokumentieren (Screenshots, Blocklists etc.) +# Router-DNS auf 1.1.1.1 setzen falls pihole dein einziger DNS ist +``` + +### Migration +```bash +# 1. Alte Container stoppen +docker stop binhex-official-pihole unbound +docker rm binhex-official-pihole unbound + +# 2. Unbound zuerst starten +docker compose -f 02_stack-dns.yml up -d unbound + +# 3. Warten bis unbound healthy +watch -n3 'docker inspect --format "{{.State.Health.Status}}" unbound' +# Erwartung: healthy + +# 4. Erst dann pihole +docker compose -f 02_stack-dns.yml up -d binhex-official-pihole +``` + +### Verifikation +```bash +# pihole → unbound Verbindung +docker exec binhex-official-pihole nslookup google.com unbound +# Erwartung: Antwort kommt zurück + +# DNS von außen (Router-DNS zurückstellen auf deine IP) +nslookup google.com 192.168.178.58 +# Erwartung: Auflösung klappt +``` + +--- + +## Phase 3 – Backend (Postgres + Redis + Paperless) +**Dauer: ~20 Min | paperless-Ausfall: ~3 Minuten** + +⚠ Backup vorhanden ✓ — trotzdem: Phase 0d zuerst abschließen! + +### Phase 3a – DB-Zustand prüfen (nichts umbauen) +```bash +docker exec -it postgresql17 psql -U postgres -c "\l" +docker exec -it postgresql17 psql -U postgres -c "\du" +``` + +**Wenn paperless-User und -DB bereits existieren** → weiter zu Phase 3b, SQL überspringen. + +**Wenn paperless-User/DB fehlen** → erst 01_paperless.sql einspielen: +```bash +# Variante B (kein Volume-Mount nötig — stdin direkt pipen): +docker exec -i postgresql17 psql -U postgres < 01_paperless.sql + +# Prüfen +docker exec -it postgresql17 psql -U postgres -c "\l" +docker exec -it postgresql17 psql -U postgres -c "\du" +# Erwartung: paperless-DB und paperless-User sichtbar +``` + +### Phase 3b – paperless stoppen +```bash +docker stop paperless-ngx Paperless-AI +docker rm paperless-ngx Paperless-AI +``` + +### Phase 3c – Redis + Postgres in backend_net ziehen +```bash +# Alte Container stoppen +docker stop Redis postgresql17 +docker rm Redis postgresql17 + +# Backend-Stack starten +docker compose -f 01_stack-backend.yml up -d + +# Health abwarten — NICHT weitermachen bevor beide healthy sind! +watch -n3 'docker ps --filter "name=postgresql17" --filter "name=Redis" --format "table {{.Names}}\t{{.Status}}"' +# Erwartung: beide "(healthy)" +``` + +### Phase 3d – Verbindungen testen +```bash +# PostgreSQL von backend_net erreichbar? +docker run --rm --network backend_net postgres:17 \ + psql "host=postgresql17 user=postgres password=$(cat secrets/postgres_password.txt) dbname=postgres" \ + -c "SELECT version();" +# Erwartung: PostgreSQL-Version + +# PostgreSQL vom Host NICHT mehr erreichbar? +nc -zv 127.0.0.1 5432 +# Erwartung: Connection refused ✓ + +# Redis PONG? +docker run --rm --network backend_net redis:7-alpine \ + redis-cli -h Redis -a "${REDIS_PASSWORD}" ping +# Erwartung: PONG +``` + +### Phase 3e – Paperless starten +```bash +docker compose -f 03_stack-frontend.yml up -d paperless-ngx Paperless-AI + +# Logs die ersten 60 Sekunden beobachten +docker logs -f paperless-ngx + +# Worauf achten: +# ✓ Keine "AUTH failed"-Fehler (Redis-Passwort stimmt) +# ✓ Keine "password authentication failed for user paperless" (DB-Passwort stimmt) +# ✓ "Migrating..." läuft durch +# ✓ Webinterface auf http://HOST:8000 erreichbar +``` + +--- + +## Phase 4 – Frontend in Wellen +**Pro Welle: ~5 Min | pro Container: ~15 Sek Ausfall** + +### Welle A – unkritisch +```bash +for c in homarr homepage Dozzle dashdot theme-park; do + docker stop $c && docker rm $c && echo "✓ $c entfernt" +done +docker compose -f 03_stack-frontend.yml up -d homarr homepage Dozzle dashdot theme-park +docker ps --filter "name=homarr" --filter "name=homepage" --filter "name=Dozzle" +``` + +### Welle B – mittelkritisch +```bash +for c in UptimeKuma scrutiny code-server; do + docker stop $c && docker rm $c && echo "✓ $c entfernt" +done +docker compose -f 03_stack-frontend.yml up -d UptimeKuma scrutiny code-server +``` + +### Welle C – sensibel (einzeln) +```bash +# PortainerCE +docker stop PortainerCE && docker rm PortainerCE +docker compose -f 03_stack-frontend.yml up -d PortainerCE + +# vaultwarden — danach Tailscale-Zugang sofort testen! +docker stop vaultwarden && docker rm vaultwarden +docker compose -f 03_stack-frontend.yml up -d vaultwarden +# Test: https://kallilabcore.taild9fcf2.ts.net:4743/admin erreichbar? + +# mail-archiver — danach Login und Sync prüfen +docker stop mail-archiver && docker rm mail-archiver +docker compose -f 03_stack-frontend.yml up -d mail-archiver +docker logs -f mail-archiver # auf DB-Verbindungsfehler achten + +# Stash — separat je nach Nutzung +docker stop Stash && docker rm Stash +docker compose -f 03_stack-frontend.yml up -d Stash + +# luckyBackup +docker stop luckyBackup && docker rm luckyBackup +docker compose -f 03_stack-frontend.yml up -d luckyBackup +``` + +--- + +## Abschlusskontrolle +```bash +# Alle Container laufen? +docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Networks}}" + +# Port 5432 nicht mehr öffentlich? +nc -zv 127.0.0.1 5432 # → Connection refused ✓ +nc -zv 127.0.0.1 6379 # → Connection refused ✓ (alter Redis-Port) +nc -zv 127.0.0.1 6382 # → Connection refused ✓ (alter Redis-Port) + +# Netzwerk-Isolation: Portainer kann postgresql17 NICHT sehen +docker exec PortainerCE ping postgresql17 2>&1 | head -2 +# → Name or service not known ✓ + +# paperless-ngx kann postgresql17 sehen +docker exec paperless-ngx nc -zv postgresql17 5432 2>&1 +# → Connection succeeded ✓ +``` + +--- + +## Phase 5 – Traefik (separat, wenn bereit) + +Erst wenn Phase 1–4 stabil laufen: + +1. `traefik/traefik.yml` anlegen (Vorlage in `04_stack-traefik.yml`) +2. Domain + DNS-Eintrag einrichten +3. Port 80 + 443 auf Router weiterleiten +4. `docker compose -f 04_stack-traefik.yml up -d` +5. Dashboard testen: `https://traefik.yourdomain.tld` +6. Pro Container: `traefik.enable: "false"` → `"true"`, Port auskommentieren + +--- + +## Was unberührt bleibt +| Stack | Grund | +|---|---| +| immich (immich_default) | bereits korrekt isoliert | +| scanopy (scanopy_scanopy) | bereits korrekt isoliert | +| Plex, Tailscale, ntopng* | host-Netz technisch bedingt | +| Netdata, Glances, netalertx | host-Netz technisch bedingt | + +*ntopng wird entfernt und später sauber neu aufgesetzt. diff --git a/infra/ddns-updater/docker-compose.yml b/infra/ddns-updater/docker-compose.yml new file mode 100644 index 0000000..73c7184 --- /dev/null +++ b/infra/ddns-updater/docker-compose.yml @@ -0,0 +1,30 @@ +services: + ddns-updater: + image: ghcr.io/qdm12/ddns-updater:latest + container_name: ddns-updater + restart: unless-stopped + + networks: + - frontend_net + + environment: + TZ: Europe/Berlin + PERIOD: 5m + SERVER_ENABLED: "yes" + LISTENING_ADDRESS: ":8000" + LOG_LEVEL: info + + volumes: + - /mnt/user/appdata/ddns-updater:/updater/data + + labels: + - "traefik.enable=true" + - "traefik.docker.network=frontend_net" + - "traefik.http.routers.ddns.rule=Host(`ddns.kaleschke.info`)" + - "traefik.http.routers.ddns.entrypoints=websecure" + - "traefik.http.routers.ddns.tls=true" + - "traefik.http.services.ddns.loadbalancer.server.port=8000" + +networks: + frontend_net: + external: true \ No newline at end of file diff --git a/infra/traefik/docker-compose.yml b/infra/traefik/docker-compose.yml new file mode 100644 index 0000000..ce2bbc2 --- /dev/null +++ b/infra/traefik/docker-compose.yml @@ -0,0 +1,62 @@ +services: + traefik: + image: traefik:v3.3 + container_name: traefik + restart: unless-stopped + + command: + - --global.checknewversion=true + - --global.sendanonymoususage=false + + - --api.dashboard=true + - --api.insecure=false + + - --log.level=INFO + + - --providers.docker=true + - --providers.docker.endpoint=unix:///var/run/docker.sock + - --providers.docker.exposedbydefault=false + - --providers.docker.network=frontend_net + + - --entrypoints.web.address=:80 + - --entrypoints.websecure.address=:443 + + - --certificatesresolvers.cloudflare.acme.email=michideheld@gmx.de + - --certificatesresolvers.cloudflare.acme.storage=/letsencrypt/acme.json + - --certificatesresolvers.cloudflare.acme.dnschallenge=true + - --certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare + + environment: + TZ: Europe/Berlin + CF_DNS_API_TOKEN_FILE: /run/secrets/cloudflare_dns_api_token + + secrets: + - cloudflare_dns_api_token + + ports: + - "80:80" + - "443:443" + + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - /mnt/user/appdata/traefik/acme.json:/letsencrypt/acme.json + + networks: + - frontend_net + + labels: + - traefik.enable=true + - traefik.docker.network=frontend_net + - traefik.http.routers.traefik.rule=Host(`traefik.kaleschke.info`) + - traefik.http.routers.traefik.entrypoints=websecure + - traefik.http.routers.traefik.tls=true + - traefik.http.routers.traefik.tls.certresolver=cloudflare + - traefik.http.routers.traefik.service=api@internal + +secrets: + cloudflare_dns_api_token: + file: /mnt/user/appdata/traefik/secrets/cloudflare_dns_api_token + +networks: + frontend_net: + external: true \ No newline at end of file