# Production / staging Docker Compose for Familienarchiv. # # This is a self-contained file (not an overlay over docker-compose.yml). # All services for the prod stack live here. Environment isolation is # achieved via the docker compose project name: # # production: docker compose -f docker-compose.prod.yml -p archiv-production ... # staging: docker compose -f docker-compose.prod.yml -p archiv-staging --profile staging ... # # Volumes, networks and containers are namespaced by the project name, # so the two environments cohabit cleanly on the same host. # # Required env vars (provided by .env.production / .env.staging in CI): # TAG image tag (release tag or "nightly") # PORT_BACKEND, PORT_FRONTEND host-side ports (bound to 127.0.0.1 only) # APP_DOMAIN e.g. archiv.raddatz.cloud / staging.raddatz.cloud # POSTGRES_PASSWORD Postgres password # MINIO_PASSWORD MinIO root password (admin operations only) # MINIO_APP_PASSWORD MinIO application service-account password # (least-privilege scope: archive bucket only) # OCR_TRAINING_TOKEN token guarding ocr-service /train endpoint # APP_ADMIN_USERNAME seeded admin email (e.g. admin@archiv.raddatz.cloud) # APP_ADMIN_PASSWORD seeded admin password — CRITICAL: locked in on # first deploy because UserDataInitializer only # creates the account if the email does not exist # MAIL_HOST, MAIL_PORT, SMTP relay (production only; staging uses mailpit) # MAIL_USERNAME, MAIL_PASSWORD # APP_MAIL_FROM sender address (e.g. noreply@raddatz.cloud) networks: archive-net: driver: bridge volumes: postgres-data: minio-data: ocr-models: ocr-cache: services: db: image: postgres:16-alpine restart: unless-stopped environment: POSTGRES_USER: archiv POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_DB: archiv volumes: - postgres-data:/var/lib/postgresql/data networks: - archive-net healthcheck: test: ["CMD-SHELL", "pg_isready -U archiv -d archiv"] interval: 10s timeout: 5s retries: 5 minio: # Pinned MinIO release for reproducible deploys; Renovate keeps it current. image: minio/minio:RELEASE.2025-02-28T09-55-16Z restart: unless-stopped command: server /data --console-address ":9001" environment: MINIO_ROOT_USER: archiv MINIO_ROOT_PASSWORD: ${MINIO_PASSWORD} volumes: - minio-data:/data networks: - archive-net healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 # Idempotent bucket bootstrap + service-account creation. # Runs once per `docker compose up` and exits 0; `--ignore-existing` and # the user-add fallback are safe on re-deploy. create-buckets: # Pinned mc client release for reproducibility; Renovate keeps it current. image: minio/mc:RELEASE.2025-08-13T08-35-41Z depends_on: minio: condition: service_healthy networks: - archive-net environment: MINIO_PASSWORD: ${MINIO_PASSWORD} MINIO_APP_PASSWORD: ${MINIO_APP_PASSWORD} entrypoint: > /bin/sh -c " set -e; /usr/bin/mc alias set myminio http://minio:9000 archiv $$MINIO_PASSWORD; /usr/bin/mc mb myminio/familienarchiv --ignore-existing; /usr/bin/mc anonymous set private myminio/familienarchiv; /usr/bin/mc admin user add myminio archiv-app $$MINIO_APP_PASSWORD || /usr/bin/mc admin user enable myminio archiv-app; /usr/bin/mc admin policy attach myminio readwrite --user archiv-app 2>/dev/null || true; /usr/bin/mc admin user info myminio archiv-app | grep -q readwrite || { echo 'FATAL: archiv-app is missing the readwrite policy'; exit 1; }; " # Dev-only mail catcher; gated behind the staging profile so production # never starts it. Staging workflow runs with `--profile staging`. mailpit: # Pinned for reproducibility; Renovate bumps the tag. image: axllent/mailpit:v1.29.7 restart: unless-stopped profiles: ["staging"] networks: - archive-net healthcheck: test: ["CMD-SHELL", "wget -qO- http://localhost:8025/api/v1/info >/dev/null 2>&1 || exit 1"] interval: 10s timeout: 5s retries: 5 ocr-service: build: context: ./ocr-service restart: unless-stopped expose: - "8000" # Surya OCR loads ~5GB of transformer models at startup; first request # triggers a further ~1GB Kraken model download into ocr-cache. # CX42+ (16 GB RAM) honours the default. On a CX32 (8 GB) override with # OCR_MEM_LIMIT=6g (slower first-request, fits the host). mem_limit: ${OCR_MEM_LIMIT:-12g} memswap_limit: ${OCR_MEM_LIMIT:-12g} volumes: - ocr-models:/app/models - ocr-cache:/root/.cache environment: KRAKEN_MODEL_PATH: /app/models/german_kurrent.mlmodel TRAINING_TOKEN: ${OCR_TRAINING_TOKEN} OCR_CONFIDENCE_THRESHOLD: "0.3" OCR_CONFIDENCE_THRESHOLD_KURRENT: "0.5" networks: - archive-net healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 10s timeout: 5s retries: 12 start_period: 120s backend: image: familienarchiv/backend:${TAG:-nightly} build: context: ./backend restart: unless-stopped depends_on: db: condition: service_healthy minio: condition: service_healthy ocr-service: condition: service_healthy # Bound to localhost only — Caddy fronts external traffic. ports: - "127.0.0.1:${PORT_BACKEND}:8080" environment: SPRING_DATASOURCE_URL: jdbc:postgresql://db:5432/archiv SPRING_DATASOURCE_USERNAME: archiv SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD} # Application uses the bucket-scoped service account, not MinIO root. S3_ENDPOINT: http://minio:9000 S3_ACCESS_KEY: archiv-app S3_SECRET_KEY: ${MINIO_APP_PASSWORD} S3_BUCKET_NAME: familienarchiv S3_REGION: us-east-1 # No SPRING_PROFILES_ACTIVE — base application.yaml is production-ready # (Swagger disabled, show-sql off, open-in-view false). APP_BASE_URL: https://${APP_DOMAIN} APP_ADMIN_USERNAME: ${APP_ADMIN_USERNAME} APP_ADMIN_PASSWORD: ${APP_ADMIN_PASSWORD} APP_OCR_BASE_URL: http://ocr-service:8000 APP_OCR_TRAINING_TOKEN: ${OCR_TRAINING_TOKEN} MAIL_HOST: ${MAIL_HOST} MAIL_PORT: ${MAIL_PORT:-587} MAIL_USERNAME: ${MAIL_USERNAME:-} MAIL_PASSWORD: ${MAIL_PASSWORD:-} APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud} SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true} SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true} networks: - archive-net healthcheck: test: ["CMD-SHELL", "wget -qO- http://localhost:8080/actuator/health | grep -q UP || exit 1"] interval: 15s timeout: 5s retries: 10 start_period: 30s frontend: image: familienarchiv/frontend:${TAG:-nightly} build: context: ./frontend target: production restart: unless-stopped depends_on: backend: condition: service_healthy ports: - "127.0.0.1:${PORT_FRONTEND}:3000" environment: # SSR fetches go inside the docker network; clients hit https://${APP_DOMAIN} API_INTERNAL_URL: http://backend:8080 ORIGIN: https://${APP_DOMAIN} networks: - archive-net healthcheck: test: ["CMD-SHELL", "wget -qO- http://localhost:3000/login >/dev/null 2>&1 || exit 1"] interval: 15s timeout: 5s retries: 10 start_period: 20s