Compare commits
194 Commits
2e6cc346ab
...
feat/issue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3438260090 | ||
|
|
0bd00a3044 | ||
|
|
d301825e50 | ||
|
|
6193e28587 | ||
|
|
bfdf64975c | ||
|
|
ea800e5e2a | ||
|
|
cfff594732 | ||
|
|
0fa330a357 | ||
|
|
a6c85e3658 | ||
|
|
e0aca0f883 | ||
|
|
a77b0c1221 | ||
|
|
393a3c25fd | ||
|
|
8c7a2741b0 | ||
|
|
865c6ed796 | ||
|
|
14542b6e33 | ||
|
|
de7053644b | ||
|
|
f1e0b92f47 | ||
|
|
bead6f1811 | ||
|
|
7769dbc9f4 | ||
|
|
74ca5ee35f | ||
|
|
38973a014e | ||
|
|
fc8b4b164b | ||
|
|
eb63df2000 | ||
|
|
53bd574660 | ||
|
|
581ba01d8d | ||
|
|
9db42d6cc1 | ||
|
|
ab24786d2a | ||
|
|
1aca4c4a41 | ||
|
|
669eaa7c65 | ||
|
|
f15ea031d1 | ||
|
|
25a39fca9c | ||
|
|
e398133907 | ||
|
|
186535f8c9 | ||
|
|
de19d17b00 | ||
|
|
b2e31c3c1b | ||
|
|
9e23620072 | ||
|
|
af42113fca | ||
|
|
c779ec59f9 | ||
|
|
2023ea2931 | ||
|
|
59b18039ed | ||
|
|
96ea7e6815 | ||
|
|
dff81f7bfb | ||
|
|
a9c82ec481 | ||
|
|
97aa372094 | ||
|
|
e61409773e | ||
|
|
7713a03cd5 | ||
|
|
cea94ce260 | ||
|
|
45a992f5a8 | ||
|
|
bd57310bbf | ||
|
|
c2d092f435 | ||
|
|
e19bd60984 | ||
|
|
2aa0ff9e70 | ||
|
|
5dd74df293 | ||
|
|
7712180f3a | ||
|
|
c9a22945c8 | ||
|
|
9d84ebc4fe | ||
|
|
58b9204395 | ||
|
|
0d662f3a5e | ||
|
|
2e864e5b81 | ||
|
|
40d9713b79 | ||
|
|
68d07fe961 | ||
|
|
6145a25fe2 | ||
|
|
c43f45a472 | ||
|
|
134f1e2ae0 | ||
|
|
55ccd5f3c0 | ||
| 3658733003 | |||
| 0bb0a314ad | |||
| b194b565f6 | |||
|
|
6720a5aeb2 | ||
|
|
a7f60ebed8 | ||
|
|
25062be657 | ||
|
|
9662ff5f8c | ||
|
|
f5c7be932b | ||
|
|
dec0001bd1 | ||
|
|
f628ab6435 | ||
|
|
4c5ee96e36 | ||
|
|
53cf1837b2 | ||
|
|
d83ed7254d | ||
|
|
1ae4bfe325 | ||
|
|
c5139851b8 | ||
|
|
f9baf02b86 | ||
|
|
b67bd201b2 | ||
|
|
79735e23e0 | ||
|
|
df37113d38 | ||
|
|
c7d2eeb3f0 | ||
|
|
4e94d85d7e | ||
|
|
dec6b8139b | ||
|
|
7b7d0c92a8 | ||
|
|
448c3cdcdb | ||
|
|
7e52494880 | ||
|
|
1181b97f94 | ||
|
|
458968ded5 | ||
|
|
23515b8542 | ||
|
|
e4ac5f08e7 | ||
|
|
15ef079eff | ||
|
|
56c3e51657 | ||
|
|
2cc8b1174b | ||
|
|
1fc47888d5 | ||
|
|
d435b2b0e4 | ||
|
|
fed427dc4a | ||
|
|
cf78ab2f8e | ||
|
|
c8883d0e40 | ||
|
|
7154092547 | ||
|
|
ada3a3ccaf | ||
|
|
8cf3a2a726 | ||
|
|
553e2f8898 | ||
|
|
4a7349543a | ||
|
|
f15e004645 | ||
|
|
b137e3e72d | ||
|
|
4c8a23ff14 | ||
|
|
d7d225af77 | ||
|
|
4358997482 | ||
|
|
7c2e75facc | ||
|
|
7b05b9d5a0 | ||
|
|
20edc0474c | ||
|
|
fa191b5c05 | ||
|
|
2139d600f5 | ||
|
|
68e4ff4121 | ||
|
|
0a1d709c5f | ||
|
|
8a00d66435 | ||
| d2ad623bb8 | |||
|
|
00a8731cdd | ||
|
|
b4e6e4ca2a | ||
| 427c3ea537 | |||
|
|
67004737f6 | ||
|
|
3ced565aa2 | ||
|
|
cd715029eb | ||
| 84f9bbadeb | |||
|
|
457c1d3aee | ||
|
|
c99321e5cf | ||
|
|
f3f8345b03 | ||
| c3b477c609 | |||
|
|
3a67f7820e | ||
|
|
6ce6122384 | ||
|
|
b3e49a9504 | ||
| 2eff1ab14c | |||
|
|
de08ffe989 | ||
| 5ed24cb6eb | |||
|
|
c1406a32f1 | ||
|
|
22e1b25398 | ||
| 6a118589c2 | |||
|
|
0c66f6298b | ||
|
|
0c9973fdff | ||
| 52508e9dea | |||
|
|
cf8d22d81b | ||
|
|
1d42be9882 | ||
|
|
33c738db3b | ||
|
|
62c807b7fe | ||
|
|
82f0f7b82c | ||
|
|
4994d28a20 | ||
|
|
15d91da174 | ||
|
|
ae6d7a5467 | ||
|
|
24a398a0d8 | ||
|
|
e2632a556d | ||
|
|
be741ff9a2 | ||
|
|
4995c3139e | ||
|
|
0a5d4fb950 | ||
|
|
e4303baa40 | ||
|
|
46c8d4553b | ||
|
|
3fc0ec95ef | ||
|
|
510fa5e398 | ||
|
|
75453bed51 | ||
|
|
78e3acaeb7 | ||
|
|
0f4c844002 | ||
|
|
4dba268a04 | ||
|
|
b0cf35cf06 | ||
|
|
0d934a1b44 | ||
|
|
f4bda546a0 | ||
|
|
b7744667f2 | ||
|
|
3d36c26226 | ||
|
|
375fd3893c | ||
|
|
c5d482bead | ||
|
|
31eacb6d06 | ||
|
|
636900110a | ||
|
|
d78ee4397b | ||
|
|
ebdb36b7d0 | ||
|
|
93ff6cfb67 | ||
|
|
ed4c4a52eb | ||
|
|
2ca8428be4 | ||
|
|
6fffc06c28 | ||
|
|
ffcb901376 | ||
|
|
30469e74c9 | ||
|
|
5646e739c2 | ||
|
|
bbbdf8cd09 | ||
|
|
f727429699 | ||
|
|
e268e2dbca | ||
|
|
3de0d2f0fe | ||
|
|
0abbc147e2 | ||
|
|
6210480952 | ||
|
|
e17f4110f1 | ||
|
|
fa46492759 | ||
|
|
3965541879 | ||
|
|
582191d014 | ||
|
|
118100e58d |
40
.env.example
40
.env.example
@@ -26,6 +26,46 @@ PORT_MAILPIT_SMTP=1025
|
||||
# Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"
|
||||
OCR_TRAINING_TOKEN=change-me-in-production
|
||||
|
||||
# --- Observability ---
|
||||
# Optional stack — start with: docker compose -f docker-compose.observability.yml up -d
|
||||
# Requires the main stack to already be running (docker compose up -d creates archiv-net).
|
||||
# In production the stack is managed from /opt/familienarchiv/ (see docs/DEPLOYMENT.md §4).
|
||||
|
||||
# Ports for host access
|
||||
PORT_GRAFANA=3003
|
||||
PORT_GLITCHTIP=3002
|
||||
PORT_PROMETHEUS=9090
|
||||
|
||||
# Grafana admin password — change this before exposing Grafana beyond localhost
|
||||
GRAFANA_ADMIN_PASSWORD=changeme
|
||||
|
||||
# GlitchTip domain — production: use https://glitchtip.archiv.raddatz.cloud (must match Caddy vhost)
|
||||
GLITCHTIP_DOMAIN=http://localhost:3002
|
||||
|
||||
# GlitchTip secret key — Django SECRET_KEY equivalent, used to sign sessions and tokens.
|
||||
# REQUIRED in production — must not be empty or 'changeme'. Fail-closed: GlitchTip will
|
||||
# refuse to start with an invalid key.
|
||||
# Generate with: python3 -c "import secrets; print(secrets.token_hex(50))"
|
||||
GLITCHTIP_SECRET_KEY=changeme-generate-a-real-secret
|
||||
|
||||
# PostgreSQL hostname for GlitchTip's db-init job and workers.
|
||||
# Override when only the staging stack is running (container name differs from archive-db).
|
||||
# Default (archive-db) is correct for production with the full stack up.
|
||||
POSTGRES_HOST=archive-db
|
||||
|
||||
# $$ escaping note: passwords in /opt/familienarchiv/.env that contain a literal '$' must
|
||||
# use '$$' so Docker Compose does not expand them as variable references.
|
||||
# Example: a password 'p@$$word' should be written as 'p@$$$$word' in the .env file.
|
||||
|
||||
# Error reporting DSNs — leave empty to disable the SDK (safe default).
|
||||
# SENTRY_DSN: backend (Spring Boot) — used by the GlitchTip/Sentry Java SDK
|
||||
SENTRY_DSN=
|
||||
SENTRY_TRACES_SAMPLE_RATE=
|
||||
# VITE_SENTRY_DSN: frontend (SvelteKit) — injected at build time via Vite
|
||||
VITE_SENTRY_DSN=
|
||||
# Sentry/GlitchTip auth token for source map upload at build time (optional)
|
||||
SENTRY_AUTH_TOKEN=
|
||||
|
||||
# Production SMTP — uncomment and fill in to send real emails instead of catching them
|
||||
# APP_BASE_URL=https://your-domain.example.com
|
||||
# MAIL_HOST=smtp.example.com
|
||||
|
||||
@@ -2,6 +2,7 @@ name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -32,6 +33,10 @@ jobs:
|
||||
run: npx @inlang/paraglide-js compile --project ./project.inlang --outdir ./src/lib/paraglide
|
||||
working-directory: frontend
|
||||
|
||||
- name: Sync SvelteKit
|
||||
run: npx svelte-kit sync
|
||||
working-directory: frontend
|
||||
|
||||
- name: Lint
|
||||
run: npm run lint
|
||||
working-directory: frontend
|
||||
@@ -56,6 +61,26 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Assert no (upload|download)-artifact past v3
|
||||
shell: bash
|
||||
run: |
|
||||
# Self-test: verify the regex catches v4+ and does not catch v3.
|
||||
tmp=$(mktemp)
|
||||
printf ' uses: actions/upload-artifact@v5\n' > "$tmp"
|
||||
grep -qP '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' "$tmp" \
|
||||
|| { echo "FAIL: guard self-test — regex missed upload-artifact@v5"; rm "$tmp"; exit 1; }
|
||||
printf ' uses: actions/upload-artifact@v3\n' > "$tmp"
|
||||
grep -qvP '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' "$tmp" \
|
||||
|| { echo "FAIL: guard self-test — regex incorrectly flagged upload-artifact@v3"; rm "$tmp"; exit 1; }
|
||||
rm "$tmp"
|
||||
# Guard: Gitea Actions (act_runner) does not implement the v4 artifact protocol.
|
||||
# Both upload-artifact and download-artifact share the same incompatibility.
|
||||
# Pin to @v3. See ADR-014 / #557.
|
||||
if grep -RPn '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' .gitea/workflows/; then
|
||||
echo "::error::actions/(upload|download)-artifact@v4+ is unsupported on Gitea Actions (act_runner). Pin to @v3. See ADR-014 / #557."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Run unit and component tests with coverage
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -77,9 +102,10 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
|
||||
- name: Upload coverage reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-reports
|
||||
path: |
|
||||
@@ -113,9 +139,10 @@ jobs:
|
||||
|| { echo "FAIL: /hilfe/transkription.html missing from prerender output"; exit 1; }
|
||||
echo "PASS: only /hilfe/transkription.html prerendered."
|
||||
|
||||
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
|
||||
- name: Upload screenshots
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: unit-test-screenshots
|
||||
path: frontend/test-results/screenshots/
|
||||
@@ -167,9 +194,17 @@ jobs:
|
||||
- name: Run backend tests
|
||||
run: |
|
||||
chmod +x mvnw
|
||||
./mvnw clean test
|
||||
./mvnw clean verify
|
||||
working-directory: backend
|
||||
|
||||
- name: Upload surefire reports
|
||||
if: always()
|
||||
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: surefire-reports
|
||||
path: backend/target/surefire-reports/
|
||||
|
||||
# ─── fail2ban Regex Regression ────────────────────────────────────────────────
|
||||
# The filter parses Caddy's JSON access log; a Caddy upgrade that reorders
|
||||
# the JSON keys would silently break it (fail2ban-regex would return
|
||||
@@ -241,6 +276,27 @@ jobs:
|
||||
echo "$dump" | grep -qE "\['add', 'familienarchiv-auth', 'polling'\]" \
|
||||
|| { echo "FAIL: familienarchiv-auth jail did not resolve to 'polling' backend"; exit 1; }
|
||||
|
||||
# ─── Semgrep Security Scan ───────────────────────────────────────────────────
|
||||
# Catches XXE-unprotected XML parser factories and similar patterns defined in
|
||||
# .semgrep/security.yml. Runs in parallel with backend-unit-tests for fast feedback.
|
||||
# Uses local rules only (no SEMGREP_APP_TOKEN / OIDC — act_runner does not support it).
|
||||
semgrep-scan:
|
||||
name: Semgrep Security Scan
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install Semgrep
|
||||
run: pip install semgrep==1.163.0
|
||||
|
||||
- name: Run security rules
|
||||
run: semgrep --config .semgrep/security.yml --error --metrics=off backend/src/
|
||||
|
||||
# ─── Compose Bucket-Bootstrap Idempotency ─────────────────────────────────────
|
||||
# docker-compose.prod.yml's create-buckets service runs on every
|
||||
# `docker compose up` (one-shot, no restart). Must be idempotent — a
|
||||
@@ -269,6 +325,8 @@ jobs:
|
||||
MAIL_HOST=mailpit
|
||||
MAIL_PORT=1025
|
||||
APP_MAIL_FROM=noreply@local
|
||||
IMPORT_HOST_DIR=/tmp/dummy-import
|
||||
COMPOSE_NETWORK_NAME=test-idem-archiv-net
|
||||
EOF
|
||||
|
||||
- name: Bring up minio
|
||||
|
||||
@@ -56,9 +56,10 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
|
||||
- name: Upload coverage log on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-log-run-${{ matrix.run }}
|
||||
path: /tmp/coverage-test-${{ github.run_id }}-${{ matrix.run }}.log
|
||||
|
||||
@@ -30,6 +30,9 @@ name: nightly
|
||||
# STAGING_OCR_TRAINING_TOKEN
|
||||
# STAGING_APP_ADMIN_USERNAME
|
||||
# STAGING_APP_ADMIN_PASSWORD
|
||||
# GRAFANA_ADMIN_PASSWORD
|
||||
# GLITCHTIP_SECRET_KEY
|
||||
# SENTRY_DSN (set after GlitchTip first-run; empty = Sentry disabled)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@@ -74,6 +77,8 @@ jobs:
|
||||
MAIL_STARTTLS_ENABLE=false
|
||||
APP_MAIL_FROM=noreply@staging.raddatz.cloud
|
||||
IMPORT_HOST_DIR=/srv/familienarchiv-staging/import
|
||||
POSTGRES_USER=archiv
|
||||
SENTRY_DSN=${{ secrets.SENTRY_DSN }}
|
||||
EOF
|
||||
|
||||
- name: Verify backend /import:ro mount is wired
|
||||
@@ -120,6 +125,77 @@ jobs:
|
||||
--profile staging \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Deploy observability configs
|
||||
# Copies the compose file and config tree from the workspace checkout
|
||||
# into /opt/familienarchiv/ — the permanent location that persists
|
||||
# between CI runs. Containers started in the next step bind-mount
|
||||
# from there, so a future workspace wipe cannot corrupt a running
|
||||
# config file.
|
||||
#
|
||||
# obs-secrets.env is written fresh from Gitea secrets on every run so
|
||||
# Gitea is always the single source of truth for secret rotation.
|
||||
# Non-secret config lives in infra/observability/obs.env (tracked in git).
|
||||
run: |
|
||||
rm -rf /opt/familienarchiv/infra/observability
|
||||
mkdir -p /opt/familienarchiv/infra/observability
|
||||
cp -r infra/observability/. /opt/familienarchiv/infra/observability/
|
||||
cp docker-compose.observability.yml /opt/familienarchiv/
|
||||
cat > /opt/familienarchiv/obs-secrets.env <<'EOF'
|
||||
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
|
||||
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
|
||||
POSTGRES_PASSWORD=${{ secrets.STAGING_POSTGRES_PASSWORD }}
|
||||
POSTGRES_HOST=archiv-staging-db-1
|
||||
EOF
|
||||
# Note: POSTGRES_HOST is derived from the Compose project name (archiv-staging)
|
||||
# and service name (db). A project rename requires updating this value.
|
||||
chmod 600 /opt/familienarchiv/obs-secrets.env
|
||||
|
||||
- name: Validate observability compose config
|
||||
# Dry-run: resolves all variable substitutions and reports any missing
|
||||
# required keys before containers start. Catches undefined variables and
|
||||
# YAML errors in config files updated by the previous step.
|
||||
# --env-file order: obs.env first (git-tracked defaults), obs-secrets.env
|
||||
# second (CI-written secrets). Later files win on duplicate keys, so
|
||||
# obs-secrets.env overrides POSTGRES_HOST set in obs.env.
|
||||
run: |
|
||||
docker compose \
|
||||
-f /opt/familienarchiv/docker-compose.observability.yml \
|
||||
--env-file /opt/familienarchiv/infra/observability/obs.env \
|
||||
--env-file /opt/familienarchiv/obs-secrets.env \
|
||||
config --quiet
|
||||
|
||||
- name: Start observability stack
|
||||
# Runs with absolute paths so bind mounts resolve to stable host paths
|
||||
# that survive workspace wipes between nightly runs (see ADR-016).
|
||||
# Non-secret config from obs.env (git-tracked); secrets from obs-secrets.env
|
||||
# (written fresh from Gitea secrets above). --env-file order: obs.env first,
|
||||
# obs-secrets.env second — later file wins on duplicate keys.
|
||||
run: |
|
||||
docker compose \
|
||||
-f /opt/familienarchiv/docker-compose.observability.yml \
|
||||
--env-file /opt/familienarchiv/infra/observability/obs.env \
|
||||
--env-file /opt/familienarchiv/obs-secrets.env \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Assert observability stack health
|
||||
# docker compose up --wait covers services WITH healthcheck directives only.
|
||||
# obs-promtail, obs-cadvisor, obs-node-exporter, and obs-glitchtip-worker have
|
||||
# no healthcheck — they are considered "started" as soon as the process runs.
|
||||
# This step explicitly asserts the five healthchecked critical services are
|
||||
# healthy before the smoke test proceeds.
|
||||
run: |
|
||||
set -e
|
||||
unhealthy=""
|
||||
for svc in obs-loki obs-prometheus obs-grafana obs-tempo obs-glitchtip; do
|
||||
status=$(docker inspect "$svc" --format '{{.State.Health.Status}}' 2>/dev/null || echo "missing")
|
||||
if [ "$status" != "healthy" ]; then
|
||||
echo "::error::$svc is not healthy (status: $status)"
|
||||
unhealthy="$unhealthy $svc"
|
||||
fi
|
||||
done
|
||||
[ -z "$unhealthy" ] || exit 1
|
||||
echo "All critical observability services are healthy"
|
||||
|
||||
- name: Reload Caddy
|
||||
# Apply any committed Caddyfile changes before smoke-testing the
|
||||
# public surface. Without this step, a Caddyfile edit lands in the
|
||||
|
||||
@@ -34,6 +34,9 @@ name: release
|
||||
# MAIL_PORT
|
||||
# MAIL_USERNAME
|
||||
# MAIL_PASSWORD
|
||||
# GRAFANA_ADMIN_PASSWORD
|
||||
# GLITCHTIP_SECRET_KEY
|
||||
# SENTRY_DSN (set after GlitchTip first-run; empty = Sentry disabled)
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -72,6 +75,8 @@ jobs:
|
||||
MAIL_STARTTLS_ENABLE=true
|
||||
APP_MAIL_FROM=noreply@raddatz.cloud
|
||||
IMPORT_HOST_DIR=/srv/familienarchiv-production/import
|
||||
POSTGRES_USER=archiv
|
||||
SENTRY_DSN=${{ secrets.SENTRY_DSN }}
|
||||
EOF
|
||||
|
||||
- name: Build images
|
||||
@@ -93,6 +98,75 @@ jobs:
|
||||
--env-file .env.production \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Deploy observability configs
|
||||
# Mirrors the nightly approach: copies obs compose file and config tree
|
||||
# to /opt/familienarchiv/ (permanent path, survives workspace wipes — ADR-016),
|
||||
# then writes obs-secrets.env fresh from Gitea secrets.
|
||||
# Non-secret config lives in infra/observability/obs.env (tracked in git).
|
||||
run: |
|
||||
rm -rf /opt/familienarchiv/infra/observability
|
||||
mkdir -p /opt/familienarchiv/infra/observability
|
||||
cp -r infra/observability/. /opt/familienarchiv/infra/observability/
|
||||
cp docker-compose.observability.yml /opt/familienarchiv/
|
||||
cat > /opt/familienarchiv/obs-secrets.env <<'EOF'
|
||||
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
|
||||
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
|
||||
POSTGRES_PASSWORD=${{ secrets.PROD_POSTGRES_PASSWORD }}
|
||||
POSTGRES_HOST=archiv-production-db-1
|
||||
EOF
|
||||
# Note: POSTGRES_HOST is derived from the Compose project name (archiv-production)
|
||||
# and service name (db). A project rename requires updating this value.
|
||||
chmod 600 /opt/familienarchiv/obs-secrets.env
|
||||
|
||||
- name: Validate observability compose config
|
||||
# Dry-run: resolves all variable substitutions and reports any missing
|
||||
# required keys before containers start. Catches undefined variables and
|
||||
# YAML errors in config files updated by the previous step.
|
||||
# --env-file order: obs.env first (git-tracked defaults), obs-secrets.env
|
||||
# second (CI-written secrets). Later files win on duplicate keys, so
|
||||
# obs-secrets.env overrides POSTGRES_HOST set in obs.env.
|
||||
# Keep in sync with the equivalent step in nightly.yml (#603).
|
||||
run: |
|
||||
docker compose \
|
||||
-f /opt/familienarchiv/docker-compose.observability.yml \
|
||||
--env-file /opt/familienarchiv/infra/observability/obs.env \
|
||||
--env-file /opt/familienarchiv/obs-secrets.env \
|
||||
config --quiet
|
||||
|
||||
- name: Start observability stack
|
||||
# Runs with absolute paths so bind mounts resolve to stable host paths
|
||||
# that survive workspace wipes between runs (see ADR-016).
|
||||
# Non-secret config from obs.env (git-tracked); secrets from obs-secrets.env
|
||||
# (written fresh from Gitea secrets above). --env-file order: obs.env first,
|
||||
# obs-secrets.env second — later file wins on duplicate keys.
|
||||
# Keep in sync with the equivalent step in nightly.yml (#603).
|
||||
run: |
|
||||
docker compose \
|
||||
-f /opt/familienarchiv/docker-compose.observability.yml \
|
||||
--env-file /opt/familienarchiv/infra/observability/obs.env \
|
||||
--env-file /opt/familienarchiv/obs-secrets.env \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Assert observability stack health
|
||||
# docker compose up --wait covers services WITH healthcheck directives only.
|
||||
# obs-promtail, obs-cadvisor, obs-node-exporter, and obs-glitchtip-worker have
|
||||
# no healthcheck — they are considered "started" as soon as the process runs.
|
||||
# This step explicitly asserts the five healthchecked critical services are
|
||||
# healthy before the smoke test proceeds.
|
||||
# Keep in sync with the equivalent step in nightly.yml (#603).
|
||||
run: |
|
||||
set -e
|
||||
unhealthy=""
|
||||
for svc in obs-loki obs-prometheus obs-grafana obs-tempo obs-glitchtip; do
|
||||
status=$(docker inspect "$svc" --format '{{.State.Health.Status}}' 2>/dev/null || echo "missing")
|
||||
if [ "$status" != "healthy" ]; then
|
||||
echo "::error::$svc is not healthy (status: $status)"
|
||||
unhealthy="$unhealthy $svc"
|
||||
fi
|
||||
done
|
||||
[ -z "$unhealthy" ] || exit 1
|
||||
echo "All critical observability services are healthy"
|
||||
|
||||
- name: Reload Caddy
|
||||
# See nightly.yml — same rationale and mechanism: DooD job containers
|
||||
# cannot call systemctl directly; nsenter via a privileged sibling
|
||||
|
||||
54
.semgrep/security.yml
Normal file
54
.semgrep/security.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
# Semgrep security rules for Familienarchiv backend.
|
||||
# These rules catch the absence of XXE protection on XML parser factories.
|
||||
# CWE-611: Improper Restriction of XML External Entity Reference.
|
||||
# Run: semgrep --config .semgrep/security.yml --error backend/src/
|
||||
|
||||
rules:
|
||||
|
||||
# DocumentBuilderFactory without XXE hardening.
|
||||
# All call sites must call setFeature("…disallow-doctype-decl", true) before use.
|
||||
- id: dbf-xxe-default
|
||||
patterns:
|
||||
- pattern: $X = DocumentBuilderFactory.newInstance();
|
||||
- pattern-not-inside: |
|
||||
...
|
||||
$X.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
|
||||
...
|
||||
message: >
|
||||
DocumentBuilderFactory without XXE protection (CWE-611).
|
||||
Call XxeSafeXmlParser.hardenedFactory() instead of DocumentBuilderFactory.newInstance().
|
||||
See: https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html
|
||||
languages: [java]
|
||||
severity: ERROR
|
||||
|
||||
# SAXParserFactory without XXE hardening.
|
||||
- id: sax-xxe-default
|
||||
patterns:
|
||||
- pattern: $X = SAXParserFactory.newInstance();
|
||||
- pattern-not-inside: |
|
||||
...
|
||||
$X.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
|
||||
...
|
||||
message: >
|
||||
SAXParserFactory without XXE protection (CWE-611).
|
||||
Set disallow-doctype-decl=true, external-general-entities=false, external-parameter-entities=false,
|
||||
and load-external-dtd=false before use. Follow the pattern in XxeSafeXmlParser.hardenedFactory().
|
||||
See: https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html
|
||||
languages: [java]
|
||||
severity: ERROR
|
||||
|
||||
# XMLInputFactory without XXE hardening (StAX parser).
|
||||
- id: stax-xxe-default
|
||||
patterns:
|
||||
- pattern: $X = XMLInputFactory.newInstance();
|
||||
- pattern-not-inside: |
|
||||
...
|
||||
$X.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false);
|
||||
...
|
||||
message: >
|
||||
XMLInputFactory without XXE protection (CWE-611).
|
||||
Set IS_SUPPORTING_EXTERNAL_ENTITIES=false and SUPPORT_DTD=false before use.
|
||||
Follow the pattern in XxeSafeXmlParser.hardenedFactory().
|
||||
See: https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html
|
||||
languages: [java]
|
||||
severity: ERROR
|
||||
31
CLAUDE.md
31
CLAUDE.md
@@ -159,7 +159,7 @@ Input DTOs live flat in the domain package. Response types are the model entitie
|
||||
|
||||
→ See [CONTRIBUTING.md §Error handling](./CONTRIBUTING.md#error-handling)
|
||||
|
||||
**LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` from service methods — never throw raw exceptions. When adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) mirror in `frontend/src/lib/shared/errors.ts`, (3) add i18n keys in `messages/{de,en,es}.json`.
|
||||
**LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` from service methods — never throw raw exceptions. When adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) add to `ErrorCode` type in `frontend/src/lib/shared/errors.ts`, (3) add a `case` in `getErrorMessage()`, (4) add i18n keys in `messages/{de,en,es}.json`.
|
||||
|
||||
### Security / Permissions
|
||||
|
||||
@@ -274,6 +274,35 @@ Back button pattern — use the shared `<BackButton>` component from `$lib/share
|
||||
|
||||
→ See [docs/DEPLOYMENT.md](./docs/DEPLOYMENT.md)
|
||||
|
||||
### Observability stack (separate compose file)
|
||||
|
||||
Run via `docker-compose.observability.yml` — requires the main stack to be running first. Full setup procedure: [docs/DEPLOYMENT.md §4](./docs/DEPLOYMENT.md#4-logs--observability).
|
||||
|
||||
| Service | Container | Default Port | Purpose |
|
||||
|---------|-----------|-------------|---------|
|
||||
| Grafana | `obs-grafana` | 3003 | Metrics / logs / traces dashboard |
|
||||
| Prometheus | `obs-prometheus` | 9090 (dev only — `127.0.0.1` bound) | Metrics store |
|
||||
| Loki | `obs-loki` | — (internal) | Log store |
|
||||
| Tempo | `obs-tempo` | — (internal) | Trace store |
|
||||
| GlitchTip | `obs-glitchtip` | 3002 | Error tracking (Sentry-compatible) |
|
||||
|
||||
### Observability env vars
|
||||
|
||||
| Variable | Purpose |
|
||||
|----------|---------|
|
||||
| `PORT_GRAFANA` | Host port for Grafana UI (default: `3003`) |
|
||||
| `PORT_GLITCHTIP` | Host port for GlitchTip UI (default: `3002`) |
|
||||
| `PORT_PROMETHEUS` | Host port for Prometheus UI (default: `9090`) |
|
||||
| `GRAFANA_ADMIN_PASSWORD` | Grafana `admin` login password — generate with `openssl rand -hex 32` |
|
||||
| `GLITCHTIP_SECRET_KEY` | Django secret key for GlitchTip — generate with `python3 -c "import secrets; print(secrets.token_hex(32))"` |
|
||||
| `GLITCHTIP_DOMAIN` | Public-facing base URL for GlitchTip (email links, CORS), e.g. `https://glitchtip.example.com` |
|
||||
| `SENTRY_DSN` | GlitchTip/Sentry DSN for the backend (Spring Boot) — leave empty to disable |
|
||||
| `VITE_SENTRY_DSN` | GlitchTip/Sentry DSN for the frontend (SvelteKit) — injected at build time via Vite |
|
||||
|
||||
## Observability
|
||||
|
||||
→ See [docs/OBSERVABILITY.md](./docs/OBSERVABILITY.md) — where to look for logs, traces, metrics, and errors.
|
||||
|
||||
## API Testing
|
||||
|
||||
HTTP test files are in `backend/api_tests/` for use with the VS Code REST Client extension.
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-parent</artifactId>
|
||||
<version>4.0.0</version>
|
||||
<version>4.0.6</version>
|
||||
<relativePath/> <!-- lookup parent from repository -->
|
||||
</parent>
|
||||
<groupId>org.raddatz</groupId>
|
||||
@@ -29,11 +29,30 @@
|
||||
<properties>
|
||||
<java.version>21</java.version>
|
||||
</properties>
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<!-- opentelemetry-spring-boot-starter:2.27.0 was built against opentelemetry-api:1.61.0,
|
||||
but Spring Boot 4.0.0 BOM only manages 1.55.0 (missing GlobalOpenTelemetry.getOrNoop()).
|
||||
Import the core OTel BOM here to override it before the Spring Boot BOM applies. -->
|
||||
<dependency>
|
||||
<groupId>io.opentelemetry</groupId>
|
||||
<artifactId>opentelemetry-bom</artifactId>
|
||||
<version>1.61.0</version>
|
||||
<type>pom</type>
|
||||
<scope>import</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-actuator</artifactId>
|
||||
</dependency>
|
||||
<!-- Spring Boot 4.0 splits Micrometer metrics export (incl. Prometheus scrape endpoint) into its own starter -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-micrometer-metrics</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-validation</artifactId>
|
||||
@@ -50,6 +69,10 @@
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-session-jdbc</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-webmvc</artifactId>
|
||||
@@ -188,7 +211,7 @@
|
||||
<dependency>
|
||||
<groupId>com.googlecode.owasp-java-html-sanitizer</groupId>
|
||||
<artifactId>owasp-java-html-sanitizer</artifactId>
|
||||
<version>20240325.1</version>
|
||||
<version>20260101.1</version>
|
||||
</dependency>
|
||||
|
||||
<!-- HTML → plain-text extraction for comment previews -->
|
||||
@@ -197,6 +220,42 @@
|
||||
<artifactId>jsoup</artifactId>
|
||||
<version>1.18.1</version>
|
||||
</dependency>
|
||||
|
||||
<!-- Observability: Prometheus metrics scrape endpoint (version managed by Spring Boot BOM) -->
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Observability: Micrometer → OpenTelemetry tracing bridge (version managed by Spring Boot BOM) -->
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-tracing-bridge-otel</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Observability: OTel Spring Boot auto-instrumentation — NOT in Spring Boot BOM, pinned explicitly -->
|
||||
<dependency>
|
||||
<groupId>io.opentelemetry.instrumentation</groupId>
|
||||
<artifactId>opentelemetry-spring-boot-starter</artifactId>
|
||||
<version>2.27.0</version>
|
||||
<exclusions>
|
||||
<!-- Excludes AzureAppServiceResourceProvider which references ServiceAttributes.SERVICE_INSTANCE_ID
|
||||
that does not exist in the semconv version pulled by this project. -->
|
||||
<exclusion>
|
||||
<groupId>io.opentelemetry.contrib</groupId>
|
||||
<artifactId>opentelemetry-azure-resources</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<!-- Sentry error reporting (GlitchTip-compatible) — sentry-spring-boot-4 is the
|
||||
Spring Boot 4 / Spring Framework 7 compatible module (replaces the jakarta starter
|
||||
which crashes with SF7 due to bean-name generation for triply-nested @Import classes) -->
|
||||
<dependency>
|
||||
<groupId>io.sentry</groupId>
|
||||
<artifactId>sentry-spring-boot-4</artifactId>
|
||||
<version>8.41.0</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
||||
@@ -242,7 +301,7 @@
|
||||
<phase>verify</phase>
|
||||
<goals><goal>report</goal></goals>
|
||||
</execution>
|
||||
<!-- Gate: baseline 89.4% overall / service 90.2% / controller 80.0% -->
|
||||
<!-- Gate: ratchet at 0.77 — actual measured coverage after drift; raise via #496 -->
|
||||
<execution>
|
||||
<id>check</id>
|
||||
<phase>verify</phase>
|
||||
@@ -255,7 +314,7 @@
|
||||
<limit>
|
||||
<counter>BRANCH</counter>
|
||||
<value>COVEREDRATIO</value>
|
||||
<minimum>0.88</minimum>
|
||||
<minimum>0.77</minimum>
|
||||
</limit>
|
||||
</limits>
|
||||
</rule>
|
||||
@@ -273,6 +332,16 @@
|
||||
</profiles>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
|
||||
<systemPropertyVariables>
|
||||
<junit.jupiter.execution.timeout.default>90 s</junit.jupiter.execution.timeout.default>
|
||||
</systemPropertyVariables>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
||||
@@ -35,7 +35,16 @@ public enum AuditKind {
|
||||
USER_DELETED,
|
||||
|
||||
/** Payload: {@code {"userId": "uuid", "email": "addr", "addedGroups": ["Admin"], "removedGroups": []}} */
|
||||
GROUP_MEMBERSHIP_CHANGED;
|
||||
GROUP_MEMBERSHIP_CHANGED,
|
||||
|
||||
/** Payload: {@code {"userId": "uuid", "ip": "1.2.3.4", "ua": "Mozilla/5.0..."}} */
|
||||
LOGIN_SUCCESS,
|
||||
|
||||
/** Payload: {@code {"email": "addr", "ip": "1.2.3.4", "ua": "Mozilla/5.0..."}} — password NEVER included */
|
||||
LOGIN_FAILED,
|
||||
|
||||
/** Payload: {@code {"userId": "uuid", "ip": "1.2.3.4", "ua": "Mozilla/5.0..."}} */
|
||||
LOGOUT;
|
||||
|
||||
public static final Set<AuditKind> ROLLUP_ELIGIBLE = Set.of(
|
||||
TEXT_SAVED, FILE_UPLOADED, ANNOTATION_CREATED,
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
package org.raddatz.familienarchiv.auth;
|
||||
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.raddatz.familienarchiv.audit.AuditKind;
|
||||
import org.raddatz.familienarchiv.audit.AuditService;
|
||||
import org.raddatz.familienarchiv.exception.DomainException;
|
||||
import org.raddatz.familienarchiv.user.AppUser;
|
||||
import org.raddatz.familienarchiv.user.UserService;
|
||||
import org.springframework.security.authentication.AuthenticationManager;
|
||||
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.security.core.AuthenticationException;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class AuthService {
|
||||
|
||||
private final AuthenticationManager authenticationManager;
|
||||
private final UserService userService;
|
||||
private final AuditService auditService;
|
||||
|
||||
/**
|
||||
* Validates credentials and returns the authenticated user plus the Spring Security
|
||||
* Authentication object. The caller is responsible for persisting the Authentication
|
||||
* to the session via SecurityContextRepository.
|
||||
*/
|
||||
public LoginResult login(String email, String password, String ip, String ua) {
|
||||
try {
|
||||
Authentication auth = authenticationManager.authenticate(
|
||||
new UsernamePasswordAuthenticationToken(email, password));
|
||||
|
||||
AppUser user = userService.findByEmail(email);
|
||||
auditService.log(AuditKind.LOGIN_SUCCESS, user.getId(), null, Map.of(
|
||||
"userId", user.getId().toString(),
|
||||
"ip", ip,
|
||||
"ua", truncateUa(ua)));
|
||||
return new LoginResult(user, auth);
|
||||
} catch (AuthenticationException ex) {
|
||||
// Audit login failure — intentionally does NOT log the attempted password.
|
||||
// DaoAuthenticationProvider already runs a dummy BCrypt on unknown users to
|
||||
// equalise timing between "user not found" and "wrong password" paths.
|
||||
auditService.log(AuditKind.LOGIN_FAILED, null, null, Map.of(
|
||||
"email", email,
|
||||
"ip", ip,
|
||||
"ua", truncateUa(ua)));
|
||||
throw DomainException.invalidCredentials();
|
||||
}
|
||||
}
|
||||
|
||||
public void logout(String email, String ip, String ua) {
|
||||
AppUser user = userService.findByEmail(email);
|
||||
auditService.log(AuditKind.LOGOUT, user.getId(), null, Map.of(
|
||||
"userId", user.getId().toString(),
|
||||
"ip", ip,
|
||||
"ua", truncateUa(ua)));
|
||||
}
|
||||
|
||||
private static String truncateUa(String ua) {
|
||||
if (ua == null) return "";
|
||||
return ua.length() > 200 ? ua.substring(0, 200) : ua;
|
||||
}
|
||||
|
||||
public record LoginResult(AppUser user, Authentication authentication) {}
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
package org.raddatz.familienarchiv.auth;
|
||||
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import jakarta.servlet.http.HttpSession;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.raddatz.familienarchiv.user.AppUser;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.security.core.context.SecurityContext;
|
||||
import org.springframework.security.core.context.SecurityContextHolder;
|
||||
import org.springframework.security.web.context.HttpSessionSecurityContextRepository;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
// @RequirePermission is intentionally absent: login is unauthenticated by design;
|
||||
// logout requires an authenticated session (enforced by SecurityConfig), not a specific permission.
|
||||
@RestController
|
||||
@RequestMapping("/api/auth")
|
||||
@RequiredArgsConstructor
|
||||
public class AuthSessionController {
|
||||
|
||||
private final AuthService authService;
|
||||
|
||||
@PostMapping("/login")
|
||||
public ResponseEntity<AppUser> login(
|
||||
@RequestBody LoginRequest request,
|
||||
HttpServletRequest httpRequest) {
|
||||
|
||||
String ip = resolveClientIp(httpRequest);
|
||||
String ua = resolveUserAgent(httpRequest);
|
||||
|
||||
AuthService.LoginResult result = authService.login(request.email(), request.password(), ip, ua);
|
||||
|
||||
// Establish server-side session. Spring Session JDBC intercepts getSession().setAttribute()
|
||||
// and persists the record; the Set-Cookie: fa_session=<opaque-id> is added automatically.
|
||||
SecurityContext context = SecurityContextHolder.createEmptyContext();
|
||||
context.setAuthentication(result.authentication());
|
||||
SecurityContextHolder.setContext(context);
|
||||
httpRequest.getSession(true)
|
||||
.setAttribute(HttpSessionSecurityContextRepository.SPRING_SECURITY_CONTEXT_KEY, context);
|
||||
|
||||
return ResponseEntity.ok(result.user());
|
||||
}
|
||||
|
||||
@PostMapping("/logout")
|
||||
public ResponseEntity<Void> logout(Authentication authentication, HttpServletRequest httpRequest) {
|
||||
String ip = resolveClientIp(httpRequest);
|
||||
String ua = resolveUserAgent(httpRequest);
|
||||
|
||||
authService.logout(authentication.getName(), ip, ua);
|
||||
|
||||
HttpSession session = httpRequest.getSession(false);
|
||||
if (session != null) {
|
||||
session.invalidate();
|
||||
}
|
||||
SecurityContextHolder.clearContext();
|
||||
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
private static String resolveClientIp(HttpServletRequest request) {
|
||||
String forwarded = request.getHeader("X-Forwarded-For");
|
||||
if (forwarded != null && !forwarded.isBlank()) {
|
||||
return forwarded.split(",")[0].trim();
|
||||
}
|
||||
return request.getRemoteAddr();
|
||||
}
|
||||
|
||||
private static String resolveUserAgent(HttpServletRequest request) {
|
||||
String ua = request.getHeader("User-Agent");
|
||||
return ua != null ? ua : "";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
package org.raddatz.familienarchiv.auth;
|
||||
|
||||
public record LoginRequest(String email, String password) {}
|
||||
@@ -0,0 +1,22 @@
|
||||
package org.raddatz.familienarchiv.config;
|
||||
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.session.web.http.CookieSerializer;
|
||||
import org.springframework.session.web.http.DefaultCookieSerializer;
|
||||
|
||||
@Configuration
|
||||
public class SpringSessionConfig {
|
||||
|
||||
@Bean
|
||||
public CookieSerializer cookieSerializer() {
|
||||
DefaultCookieSerializer serializer = new DefaultCookieSerializer();
|
||||
serializer.setCookieName("fa_session");
|
||||
serializer.setSameSite("Strict");
|
||||
// cookieHttpOnly: true is the DefaultCookieSerializer default
|
||||
// useSecureCookie not set: auto-detects from request.isSecure().
|
||||
// With forward-headers-strategy: native, Caddy's X-Forwarded-Proto: https
|
||||
// causes isSecure() → true in production; direct HTTP in dev/tests → false.
|
||||
return serializer;
|
||||
}
|
||||
}
|
||||
@@ -39,6 +39,11 @@ public class DomainException extends RuntimeException {
|
||||
return new DomainException(ErrorCode.UNAUTHORIZED, HttpStatus.UNAUTHORIZED, message);
|
||||
}
|
||||
|
||||
public static DomainException invalidCredentials() {
|
||||
return new DomainException(ErrorCode.INVALID_CREDENTIALS, HttpStatus.UNAUTHORIZED,
|
||||
"Invalid email or password");
|
||||
}
|
||||
|
||||
public static DomainException conflict(ErrorCode code, String message) {
|
||||
return new DomainException(code, HttpStatus.CONFLICT, message);
|
||||
}
|
||||
|
||||
@@ -30,6 +30,8 @@ public enum ErrorCode {
|
||||
// --- Users ---
|
||||
/** A user with the given ID or username does not exist. 404 */
|
||||
USER_NOT_FOUND,
|
||||
/** A group with the given ID does not exist. 404 */
|
||||
GROUP_NOT_FOUND,
|
||||
/** The supplied email address is already used by another account. 409 */
|
||||
EMAIL_ALREADY_IN_USE,
|
||||
/** The supplied current password does not match the stored hash. 400 */
|
||||
@@ -52,12 +54,18 @@ public enum ErrorCode {
|
||||
INVITE_REVOKED,
|
||||
/** The invite has passed its expiry date. 410 */
|
||||
INVITE_EXPIRED,
|
||||
/** A group cannot be deleted because one or more active invites reference it. 409 */
|
||||
GROUP_HAS_ACTIVE_INVITES,
|
||||
|
||||
// --- Auth ---
|
||||
/** The request is not authenticated. 401 */
|
||||
UNAUTHORIZED,
|
||||
/** The authenticated user lacks the required permission. 403 */
|
||||
FORBIDDEN,
|
||||
/** The supplied email/password combination does not match any active account. 401 */
|
||||
INVALID_CREDENTIALS,
|
||||
/** The session has expired or been invalidated. 401 */
|
||||
SESSION_EXPIRED,
|
||||
/** The password-reset token is missing, expired, or already used. 400 */
|
||||
INVALID_RESET_TOKEN,
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package org.raddatz.familienarchiv.exception;
|
||||
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import io.sentry.Sentry;
|
||||
import jakarta.validation.ConstraintViolationException;
|
||||
import org.raddatz.familienarchiv.exception.DomainException;
|
||||
import org.raddatz.familienarchiv.exception.ErrorCode;
|
||||
@@ -63,6 +64,7 @@ public class GlobalExceptionHandler {
|
||||
|
||||
@ExceptionHandler(Exception.class)
|
||||
public ResponseEntity<ErrorResponse> handleGeneric(Exception ex) {
|
||||
Sentry.captureException(ex);
|
||||
log.error("Unhandled exception", ex);
|
||||
return ResponseEntity.internalServerError()
|
||||
.body(new ErrorResponse(ErrorCode.INTERNAL_ERROR, "An unexpected error occurred"));
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package org.raddatz.familienarchiv.importing;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.poi.ss.usermodel.*;
|
||||
@@ -52,9 +53,9 @@ public class MassImportService {
|
||||
|
||||
public enum State { IDLE, RUNNING, DONE, FAILED }
|
||||
|
||||
public record ImportStatus(State state, String message, int processed, LocalDateTime startedAt) {}
|
||||
public record ImportStatus(State state, String statusCode, @JsonIgnore String message, int processed, LocalDateTime startedAt) {}
|
||||
|
||||
private volatile ImportStatus currentStatus = new ImportStatus(State.IDLE, "Kein Import gestartet.", 0, null);
|
||||
private volatile ImportStatus currentStatus = new ImportStatus(State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
|
||||
|
||||
public ImportStatus getStatus() {
|
||||
return currentStatus;
|
||||
@@ -116,20 +117,29 @@ public class MassImportService {
|
||||
if (currentStatus.state() == State.RUNNING) {
|
||||
throw DomainException.conflict(ErrorCode.IMPORT_ALREADY_RUNNING, "A mass import is already in progress");
|
||||
}
|
||||
currentStatus = new ImportStatus(State.RUNNING, "Import läuft...", 0, LocalDateTime.now());
|
||||
currentStatus = new ImportStatus(State.RUNNING, "IMPORT_RUNNING", "Import läuft...", 0, LocalDateTime.now());
|
||||
try {
|
||||
File spreadsheet = findSpreadsheetFile();
|
||||
log.info("Starte Massenimport aus: {}", spreadsheet.getAbsolutePath());
|
||||
int processed = processRows(readSpreadsheet(spreadsheet));
|
||||
currentStatus = new ImportStatus(State.DONE,
|
||||
currentStatus = new ImportStatus(State.DONE, "IMPORT_DONE",
|
||||
"Import abgeschlossen. " + processed + " Dokumente verarbeitet.",
|
||||
processed, currentStatus.startedAt());
|
||||
} catch (NoSpreadsheetException e) {
|
||||
log.error("Massenimport fehlgeschlagen: keine Tabellendatei", e);
|
||||
currentStatus = new ImportStatus(State.FAILED, "IMPORT_FAILED_NO_SPREADSHEET",
|
||||
"Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
|
||||
} catch (Exception e) {
|
||||
log.error("Massenimport fehlgeschlagen", e);
|
||||
currentStatus = new ImportStatus(State.FAILED, "Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
|
||||
currentStatus = new ImportStatus(State.FAILED, "IMPORT_FAILED_INTERNAL",
|
||||
"Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
|
||||
}
|
||||
}
|
||||
|
||||
private static class NoSpreadsheetException extends RuntimeException {
|
||||
NoSpreadsheetException(String message) { super(message); }
|
||||
}
|
||||
|
||||
private File findSpreadsheetFile() throws IOException {
|
||||
try (Stream<Path> files = Files.list(Paths.get(importDir))) {
|
||||
return files
|
||||
@@ -138,7 +148,7 @@ public class MassImportService {
|
||||
return name.endsWith(".ods") || name.endsWith(".xlsx") || name.endsWith(".xls");
|
||||
})
|
||||
.findFirst()
|
||||
.orElseThrow(() -> new RuntimeException(
|
||||
.orElseThrow(() -> new NoSpreadsheetException(
|
||||
"Keine Tabellendatei (.ods/.xlsx/.xls) in " + importDir + " gefunden!"))
|
||||
.toFile();
|
||||
}
|
||||
@@ -158,14 +168,14 @@ public class MassImportService {
|
||||
* Reads an ODS file by parsing its content.xml directly (no extra library needed).
|
||||
* ODS is a ZIP archive; content.xml holds the spreadsheet data as XML.
|
||||
*/
|
||||
private List<List<String>> readOds(File file) throws Exception {
|
||||
List<List<String>> readOds(File file) throws Exception {
|
||||
List<List<String>> result = new ArrayList<>();
|
||||
|
||||
try (ZipFile zip = new ZipFile(file)) {
|
||||
var entry = zip.getEntry("content.xml");
|
||||
if (entry == null) throw new RuntimeException("Ungültige ODS-Datei: content.xml fehlt");
|
||||
|
||||
var factory = DocumentBuilderFactory.newInstance();
|
||||
var factory = XxeSafeXmlParser.hardenedFactory();
|
||||
factory.setNamespaceAware(true);
|
||||
var builder = factory.newDocumentBuilder();
|
||||
var doc = builder.parse(zip.getInputStream(entry));
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
package org.raddatz.familienarchiv.importing;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
|
||||
class XxeSafeXmlParser {
|
||||
|
||||
private XxeSafeXmlParser() {}
|
||||
|
||||
static DocumentBuilderFactory hardenedFactory() throws ParserConfigurationException {
|
||||
var factory = DocumentBuilderFactory.newInstance();
|
||||
factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
|
||||
factory.setFeature("http://xml.org/sax/features/external-general-entities", false);
|
||||
factory.setFeature("http://xml.org/sax/features/external-parameter-entities", false);
|
||||
factory.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
|
||||
factory.setXIncludeAware(false);
|
||||
factory.setExpandEntityReferences(false);
|
||||
return factory;
|
||||
}
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
package org.raddatz.familienarchiv.security;
|
||||
|
||||
import jakarta.servlet.FilterChain;
|
||||
import jakarta.servlet.ServletException;
|
||||
import jakarta.servlet.http.Cookie;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletRequestWrapper;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import org.springframework.core.annotation.Order;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.web.filter.OncePerRequestFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URLDecoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
|
||||
/**
|
||||
* Promotes the {@code auth_token} cookie to an {@code Authorization} header
|
||||
* so that browser-side requests to {@code /api/*} authenticate the same way
|
||||
* SSR fetches do.
|
||||
*
|
||||
* <p>The SvelteKit login action stores the full HTTP Basic header value
|
||||
* ({@code "Basic <base64>"}) in an HttpOnly cookie. SSR fetches from
|
||||
* {@code hooks.server.ts} read the cookie and pass it explicitly as the
|
||||
* {@code Authorization} header. In the dev environment, Vite's proxy does
|
||||
* the same on every {@code /api/*} request (see {@code vite.config.ts}).
|
||||
* In production, Caddy proxies {@code /api/*} straight to the backend and
|
||||
* does NOT translate the cookie — so client-side {@code fetch} and
|
||||
* {@code EventSource} calls reach the backend without auth, get
|
||||
* {@code 401 WWW-Authenticate: Basic}, and the browser pops a native dialog.
|
||||
*
|
||||
* <p>This filter closes that gap: if a request has an {@code auth_token}
|
||||
* cookie but no explicit {@code Authorization} header, promote the cookie
|
||||
* value (URL-decoded) into the header before Spring Security inspects it.
|
||||
* Explicit {@code Authorization} headers are preserved unchanged.
|
||||
*
|
||||
* <p>See #520. Filter runs at {@code Ordered.HIGHEST_PRECEDENCE} so it
|
||||
* mutates the request before any Spring Security filter sees it.
|
||||
*
|
||||
* <p><b>Scope:</b> only {@code /api/*} requests are touched. The
|
||||
* {@code /actuator/*} block in Caddy plus the open auth/reset paths in
|
||||
* {@link SecurityConfig} must NOT receive a promoted Authorization.
|
||||
*
|
||||
* <p><b>⚠ Log-leakage warning:</b> the wrapped request exposes the
|
||||
* Authorization header via {@code getHeaderNames}/{@code getHeaders}. Any
|
||||
* filter or interceptor that iterates request headers will see the live
|
||||
* Basic credential. Do NOT add a request-header logger downstream of this
|
||||
* filter without explicitly scrubbing the {@code Authorization} field.
|
||||
*/
|
||||
@Component
|
||||
@Order(org.springframework.core.Ordered.HIGHEST_PRECEDENCE)
|
||||
public class AuthTokenCookieFilter extends OncePerRequestFilter {
|
||||
|
||||
static final String COOKIE_NAME = "auth_token";
|
||||
static final String SCOPE_PREFIX = "/api/";
|
||||
|
||||
@Override
|
||||
protected void doFilterInternal(HttpServletRequest request,
|
||||
HttpServletResponse response,
|
||||
FilterChain chain) throws ServletException, IOException {
|
||||
// Scope: only /api/* needs cookie promotion. /actuator/health (open),
|
||||
// /api/auth/forgot-password (open), /login etc. don't.
|
||||
if (!request.getRequestURI().startsWith(SCOPE_PREFIX)) {
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
// An explicit Authorization header wins — this is the SSR fetch path
|
||||
// (hooks.server.ts builds the header itself).
|
||||
if (request.getHeader(HttpHeaders.AUTHORIZATION) != null) {
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
Cookie[] cookies = request.getCookies();
|
||||
if (cookies == null) {
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
for (Cookie c : cookies) {
|
||||
if (COOKIE_NAME.equals(c.getName()) && c.getValue() != null && !c.getValue().isBlank()) {
|
||||
String decoded;
|
||||
try {
|
||||
decoded = URLDecoder.decode(c.getValue(), StandardCharsets.UTF_8);
|
||||
} catch (IllegalArgumentException malformed) {
|
||||
// Malformed percent-encoding — refuse to forward a bogus
|
||||
// Authorization header. Spring Security will treat the
|
||||
// request as unauthenticated.
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
chain.doFilter(new AuthHeaderRequest(request, decoded), response);
|
||||
return;
|
||||
}
|
||||
}
|
||||
chain.doFilter(request, response);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds (or overrides) the {@code Authorization} header on a wrapped request.
|
||||
* All other headers pass through unchanged.
|
||||
*/
|
||||
static final class AuthHeaderRequest extends HttpServletRequestWrapper {
|
||||
private final String authorization;
|
||||
|
||||
AuthHeaderRequest(HttpServletRequest request, String authorization) {
|
||||
super(request);
|
||||
this.authorization = authorization;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHeader(String name) {
|
||||
if (HttpHeaders.AUTHORIZATION.equalsIgnoreCase(name)) {
|
||||
return authorization;
|
||||
}
|
||||
return super.getHeader(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration<String> getHeaders(String name) {
|
||||
if (HttpHeaders.AUTHORIZATION.equalsIgnoreCase(name)) {
|
||||
return Collections.enumeration(Collections.singletonList(authorization));
|
||||
}
|
||||
return super.getHeaders(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration<String> getHeaderNames() {
|
||||
Enumeration<String> base = super.getHeaderNames();
|
||||
java.util.Set<String> names = new java.util.LinkedHashSet<>();
|
||||
while (base.hasMoreElements()) names.add(base.nextElement());
|
||||
names.add(HttpHeaders.AUTHORIZATION);
|
||||
return Collections.enumeration(names);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,13 +3,17 @@ package org.raddatz.familienarchiv.security;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
||||
import org.raddatz.familienarchiv.user.CustomUserDetailsService;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.annotation.Order;
|
||||
import org.springframework.core.env.Environment;
|
||||
import org.springframework.security.authentication.AuthenticationManager;
|
||||
import org.springframework.security.authentication.dao.DaoAuthenticationProvider;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.authentication.configuration.AuthenticationConfiguration;
|
||||
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
|
||||
import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
|
||||
import org.springframework.security.config.annotation.web.configurers.AbstractHttpConfigurer;
|
||||
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
|
||||
import org.springframework.security.crypto.password.PasswordEncoder;
|
||||
import org.springframework.security.web.SecurityFilterChain;
|
||||
@@ -34,28 +38,51 @@ public class SecurityConfig {
|
||||
return authProvider;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AuthenticationManager authenticationManager(AuthenticationConfiguration config) throws Exception {
|
||||
return config.getAuthenticationManager();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Order(1)
|
||||
public SecurityFilterChain managementFilterChain(HttpSecurity http) throws Exception {
|
||||
http
|
||||
.securityMatcher("/actuator/**")
|
||||
.authorizeHttpRequests(auth -> {
|
||||
// Health and Prometheus are open — Docker health checks and Prometheus scraping need no credentials.
|
||||
auth.requestMatchers("/actuator/health", "/actuator/prometheus").permitAll();
|
||||
// All other actuator endpoints (metrics, info, env, heapdump…) require authentication.
|
||||
auth.anyRequest().authenticated();
|
||||
})
|
||||
// Explicitly return 401 for any unauthenticated actuator request.
|
||||
// Without this override, Spring Security's DelegatingAuthenticationEntryPoint
|
||||
// would redirect browser-like clients to the form-login page (302 → /login),
|
||||
// making it impossible to distinguish "not authenticated" from "not found" in tests.
|
||||
.exceptionHandling(ex -> ex.authenticationEntryPoint(
|
||||
(req, res, e) -> res.setStatus(HttpServletResponse.SC_UNAUTHORIZED)))
|
||||
.formLogin(AbstractHttpConfigurer::disable)
|
||||
.csrf(AbstractHttpConfigurer::disable);
|
||||
return http.build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
|
||||
http
|
||||
// CSRF is intentionally disabled. With the cookie-promotion model
|
||||
// (auth_token cookie → Authorization header via AuthTokenCookieFilter,
|
||||
// see #520), every authenticated request to /api/* now carries the
|
||||
// credential automatically once the cookie is set. The CSRF defence
|
||||
// for state-changing endpoints is therefore LOAD-BEARING on:
|
||||
// CSRF is intentionally disabled. The session model relies on:
|
||||
// 1. SameSite=Strict on the fa_session cookie — a cross-site POST from
|
||||
// evil.com cannot include the cookie.
|
||||
// 2. CORS — Spring's default rejects cross-origin requests with credentials
|
||||
// unless explicitly allowed (no allowedOrigins config).
|
||||
//
|
||||
// 1. SameSite=strict on the auth_token cookie (login/+page.server.ts).
|
||||
// A cross-site POST from evil.com cannot include the cookie.
|
||||
// 2. CORS — Spring's default rejects cross-origin requests with
|
||||
// credentials unless explicitly allowed (no allowedOrigins config).
|
||||
//
|
||||
// If either of those is ever weakened (e.g. cookie flipped to
|
||||
// SameSite=lax, CORS allowedOrigins expanded), CSRF protection
|
||||
// MUST be re-enabled here.
|
||||
// If either of those is ever weakened, CSRF protection MUST be re-enabled.
|
||||
// Re-enabling CSRF (CookieCsrfTokenRepository) is planned for Phase 2 (#524).
|
||||
.csrf(csrf -> csrf.disable())
|
||||
|
||||
.authorizeHttpRequests(auth -> {
|
||||
// Health endpoint must be open so CI/Docker health checks work without credentials
|
||||
auth.requestMatchers("/actuator/health").permitAll();
|
||||
// Actuator endpoints are governed by managementFilterChain (@Order(1)) above.
|
||||
auth.requestMatchers("/actuator/health", "/actuator/prometheus").permitAll();
|
||||
// Login is unauthenticated by definition
|
||||
auth.requestMatchers("/api/auth/login").permitAll();
|
||||
// Password reset endpoints are unauthenticated by nature
|
||||
auth.requestMatchers("/api/auth/forgot-password", "/api/auth/reset-password").permitAll();
|
||||
// Invite-based registration endpoints are public
|
||||
@@ -75,9 +102,10 @@ public class SecurityConfig {
|
||||
// erlaubt pdf im Iframe
|
||||
.headers(headers -> headers
|
||||
.frameOptions(frameOptions -> frameOptions.sameOrigin()))
|
||||
// Erlaubt Login via Browser-Popup oder REST-Header (Authorization: Basic ...)
|
||||
.httpBasic(Customizer.withDefaults())
|
||||
.formLogin(form -> form.usernameParameter("email"));
|
||||
// Return 401 (not 302 redirect to /login) for unauthenticated API requests.
|
||||
// httpBasic and formLogin are removed — authentication is via Spring Session only.
|
||||
.exceptionHandling(ex -> ex.authenticationEntryPoint(
|
||||
(req, res, e) -> res.setStatus(HttpServletResponse.SC_UNAUTHORIZED)));
|
||||
|
||||
return http.build();
|
||||
}
|
||||
|
||||
@@ -52,7 +52,11 @@ public class InviteService {
|
||||
public InviteToken createInvite(CreateInviteRequest dto, AppUser creator) {
|
||||
Set<UUID> groupIds = new HashSet<>();
|
||||
if (dto.getGroupIds() != null && !dto.getGroupIds().isEmpty()) {
|
||||
List<UserGroup> groups = userService.findGroupsByIds(dto.getGroupIds());
|
||||
Set<UUID> uniqueIds = new HashSet<>(dto.getGroupIds());
|
||||
List<UserGroup> groups = userService.findGroupsByIds(new ArrayList<>(uniqueIds));
|
||||
if (groups.size() != uniqueIds.size()) {
|
||||
throw DomainException.notFound(ErrorCode.GROUP_NOT_FOUND, "One or more group IDs do not exist");
|
||||
}
|
||||
groups.forEach(g -> groupIds.add(g.getId()));
|
||||
}
|
||||
|
||||
|
||||
@@ -24,4 +24,7 @@ public interface InviteTokenRepository extends JpaRepository<InviteToken, UUID>
|
||||
|
||||
@Query("SELECT t FROM InviteToken t ORDER BY t.createdAt DESC")
|
||||
List<InviteToken> findAllOrderedByCreatedAt();
|
||||
|
||||
@Query("SELECT CASE WHEN COUNT(t) > 0 THEN true ELSE false END FROM InviteToken t JOIN t.groupIds g WHERE g = :groupId AND t.revoked = false AND (t.expiresAt IS NULL OR t.expiresAt > CURRENT_TIMESTAMP) AND (t.maxUses IS NULL OR t.useCount < t.maxUses)")
|
||||
boolean existsActiveWithGroupId(@Param("groupId") UUID groupId);
|
||||
}
|
||||
|
||||
@@ -37,6 +37,9 @@ public class UserService {
|
||||
|
||||
private final AppUserRepository userRepository;
|
||||
private final UserGroupRepository groupRepository;
|
||||
// Injected directly (not via InviteService) to avoid a constructor injection cycle:
|
||||
// InviteService → UserService → InviteService. Spring Framework 7 forbids such cycles.
|
||||
private final InviteTokenRepository inviteTokenRepository;
|
||||
private final PasswordEncoder passwordEncoder;
|
||||
private final AuditService auditService;
|
||||
|
||||
@@ -288,6 +291,10 @@ public class UserService {
|
||||
|
||||
@Transactional
|
||||
public void deleteGroup(UUID id) {
|
||||
if (inviteTokenRepository.existsActiveWithGroupId(id)) {
|
||||
throw DomainException.conflict(ErrorCode.GROUP_HAS_ACTIVE_INVITES,
|
||||
"Cannot delete group " + id + " — referenced by one or more active invites");
|
||||
}
|
||||
groupRepository.deleteById(id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
spring:
|
||||
jpa:
|
||||
show-sql: true
|
||||
# spring.session.cookie.secure is no longer a supported Boot 4.x property.
|
||||
# DefaultCookieSerializer auto-detects Secure from request.isSecure().
|
||||
# Direct HTTP in dev → isSecure()=false → cookie sent without Secure attribute.
|
||||
|
||||
springdoc:
|
||||
api-docs:
|
||||
|
||||
@@ -38,6 +38,13 @@ spring:
|
||||
starttls:
|
||||
enable: true
|
||||
|
||||
session:
|
||||
timeout: 28800s # 8 h idle timeout (MaxInactiveIntervalInSeconds)
|
||||
jdbc:
|
||||
initialize-schema: never # Flyway owns schema creation (V67)
|
||||
# Cookie name, SameSite, and Secure are configured via SpringSessionConfig#cookieSerializer
|
||||
# (spring.session.cookie.* is not supported in Spring Boot 4.x).
|
||||
|
||||
server:
|
||||
# Behind Caddy/reverse proxy: trust X-Forwarded-{Proto,For,Host} so that
|
||||
# request.getScheme(), redirect URLs, and Spring Session "Secure" cookies
|
||||
@@ -45,9 +52,50 @@ server:
|
||||
forward-headers-strategy: native
|
||||
|
||||
management:
|
||||
server:
|
||||
# Management port is separate from the app port so that:
|
||||
# (a) Caddy never proxies /actuator/* (it only routes :8080 → the app port)
|
||||
# (b) Prometheus scrapes backend:8081 directly inside archiv-net, not via Caddy
|
||||
# Note: in Spring Boot 4.0 the management port shares the security filter chain; /actuator/health
|
||||
# and /actuator/prometheus must be explicitly permitted in SecurityConfig — see SecurityConfig.java.
|
||||
port: 8081
|
||||
endpoints:
|
||||
web:
|
||||
exposure:
|
||||
include: health,info,prometheus,metrics
|
||||
endpoint:
|
||||
prometheus:
|
||||
enabled: true
|
||||
# Spring Boot 4.0: metrics export is disabled by default — explicitly opt in for Prometheus
|
||||
prometheus:
|
||||
metrics:
|
||||
export:
|
||||
enabled: true
|
||||
metrics:
|
||||
tags:
|
||||
# Common tag applied to every metric so Grafana's Spring Boot dashboard can filter by application name.
|
||||
# Override via MANAGEMENT_METRICS_TAGS_APPLICATION env var.
|
||||
application: ${spring.application.name}
|
||||
health:
|
||||
mail:
|
||||
enabled: false
|
||||
tracing:
|
||||
sampling:
|
||||
probability: 1.0 # 100% in dev; override via MANAGEMENT_TRACING_SAMPLING_PROBABILITY in prod compose
|
||||
|
||||
# OpenTelemetry trace export — failures are non-fatal (app starts cleanly without Tempo running)
|
||||
# Port 4318 = OTLP HTTP (the default transport for Spring Boot's HttpExporter).
|
||||
# Port 4317 is gRPC-only; sending HTTP/1.1 to it produces "Connection reset".
|
||||
otel:
|
||||
service:
|
||||
name: familienarchiv-backend
|
||||
exporter:
|
||||
otlp:
|
||||
endpoint: ${OTEL_EXPORTER_OTLP_ENDPOINT:http://localhost:4318}
|
||||
logs:
|
||||
exporter: none # Promtail captures Docker logs; disable OTLP log export (Tempo only accepts traces)
|
||||
metrics:
|
||||
exporter: none # Prometheus scrapes /actuator/prometheus; disable OTLP metric export to Tempo
|
||||
|
||||
springdoc:
|
||||
api-docs:
|
||||
@@ -93,3 +141,12 @@ ocr:
|
||||
sender-model:
|
||||
activation-threshold: 100
|
||||
retrain-delta: 50
|
||||
|
||||
sentry:
|
||||
dsn: ${SENTRY_DSN:}
|
||||
environment: ${SPRING_PROFILES_ACTIVE:dev}
|
||||
traces-sample-rate: ${SENTRY_TRACES_SAMPLE_RATE:1.0}
|
||||
send-default-pii: false
|
||||
enable-tracing: true
|
||||
ignored-exceptions-for-type:
|
||||
- org.raddatz.familienarchiv.exception.DomainException
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
-- The composite PK (invite_token_id, group_id) does not support efficient lookups by group_id alone.
|
||||
-- Add a dedicated index to support existsActiveWithGroupId queries.
|
||||
CREATE INDEX idx_itg_group_id ON invite_token_group_ids (group_id);
|
||||
@@ -0,0 +1,27 @@
|
||||
-- Re-introduces the Spring Session JDBC tables that were dropped by V2 as unused.
|
||||
-- DDL copied verbatim from Spring Session 3.x schema-postgresql.sql.
|
||||
-- See ADR-020 and issue #523.
|
||||
|
||||
CREATE TABLE spring_session (
|
||||
PRIMARY_ID CHAR(36) NOT NULL,
|
||||
SESSION_ID CHAR(36) NOT NULL,
|
||||
CREATION_TIME BIGINT NOT NULL,
|
||||
LAST_ACCESS_TIME BIGINT NOT NULL,
|
||||
MAX_INACTIVE_INTERVAL INT NOT NULL,
|
||||
EXPIRY_TIME BIGINT NOT NULL,
|
||||
PRINCIPAL_NAME VARCHAR(100),
|
||||
CONSTRAINT spring_session_pk PRIMARY KEY (PRIMARY_ID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX spring_session_ix1 ON spring_session (SESSION_ID);
|
||||
CREATE INDEX spring_session_ix2 ON spring_session (EXPIRY_TIME);
|
||||
CREATE INDEX spring_session_ix3 ON spring_session (PRINCIPAL_NAME);
|
||||
|
||||
CREATE TABLE spring_session_attributes (
|
||||
SESSION_PRIMARY_ID CHAR(36) NOT NULL,
|
||||
ATTRIBUTE_NAME VARCHAR(200) NOT NULL,
|
||||
ATTRIBUTE_BYTES BYTEA NOT NULL,
|
||||
CONSTRAINT spring_session_attributes_pk PRIMARY KEY (SESSION_PRIMARY_ID, ATTRIBUTE_NAME),
|
||||
CONSTRAINT spring_session_attributes_fk FOREIGN KEY (SESSION_PRIMARY_ID)
|
||||
REFERENCES spring_session (PRIMARY_ID) ON DELETE CASCADE
|
||||
);
|
||||
@@ -0,0 +1,63 @@
|
||||
package org.raddatz.familienarchiv;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.web.server.LocalManagementPort;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.web.client.DefaultResponseErrorHandler;
|
||||
import org.springframework.web.client.RestTemplate;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
class ActuatorPrometheusIT {
|
||||
|
||||
@LocalManagementPort
|
||||
private int managementPort;
|
||||
|
||||
@MockitoBean
|
||||
S3Client s3Client;
|
||||
|
||||
@Test
|
||||
void prometheus_endpoint_returns_200_without_credentials() {
|
||||
ResponseEntity<String> response = noThrowTemplate().getForEntity(
|
||||
"http://localhost:" + managementPort + "/actuator/prometheus", String.class);
|
||||
|
||||
assertThat(response.getStatusCode().value()).isEqualTo(200);
|
||||
}
|
||||
|
||||
@Test
|
||||
void prometheus_endpoint_returns_jvm_metrics() {
|
||||
ResponseEntity<String> response = noThrowTemplate().getForEntity(
|
||||
"http://localhost:" + managementPort + "/actuator/prometheus", String.class);
|
||||
|
||||
assertThat(response.getBody()).contains("jvm_memory_used_bytes");
|
||||
}
|
||||
|
||||
@Test
|
||||
void actuator_metrics_requires_authentication() {
|
||||
ResponseEntity<String> response = noThrowTemplate().getForEntity(
|
||||
"http://localhost:" + managementPort + "/actuator/metrics", String.class);
|
||||
|
||||
assertThat(response.getStatusCode().value()).isEqualTo(401);
|
||||
}
|
||||
|
||||
private RestTemplate noThrowTemplate() {
|
||||
RestTemplate template = new RestTemplate();
|
||||
template.setErrorHandler(new DefaultResponseErrorHandler() {
|
||||
@Override
|
||||
public boolean hasError(org.springframework.http.client.ClientHttpResponse response) throws IOException {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
return template;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package org.raddatz.familienarchiv;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.web.server.LocalManagementPort;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.web.client.DefaultResponseErrorHandler;
|
||||
import org.springframework.web.client.RestTemplate;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
class ActuatorSecurityTest {
|
||||
|
||||
@LocalManagementPort
|
||||
private int managementPort;
|
||||
|
||||
@MockitoBean
|
||||
S3Client s3Client;
|
||||
|
||||
@Test
|
||||
void actuator_health_is_accessible_without_authentication() {
|
||||
ResponseEntity<String> response = noThrowTemplate().getForEntity(
|
||||
"http://localhost:" + managementPort + "/actuator/health", String.class);
|
||||
|
||||
assertThat(response.getStatusCode().value()).isEqualTo(200);
|
||||
}
|
||||
|
||||
@Test
|
||||
void actuator_env_requires_authentication() {
|
||||
ResponseEntity<String> response = noThrowTemplate().getForEntity(
|
||||
"http://localhost:" + managementPort + "/actuator/env", String.class);
|
||||
|
||||
assertThat(response.getStatusCode().value()).isEqualTo(401);
|
||||
}
|
||||
|
||||
private RestTemplate noThrowTemplate() {
|
||||
RestTemplate template = new RestTemplate();
|
||||
template.setErrorHandler(new DefaultResponseErrorHandler() {
|
||||
@Override
|
||||
public boolean hasError(org.springframework.http.client.ClientHttpResponse response) throws IOException {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
return template;
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,18 @@
|
||||
package org.raddatz.familienarchiv;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.testcontainers.service.connection.ServiceConnection;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.testcontainers.containers.PostgreSQLContainer;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
@@ -17,9 +21,18 @@ class ApplicationContextTest {
|
||||
@MockitoBean
|
||||
S3Client s3Client;
|
||||
|
||||
@Autowired
|
||||
ApplicationContext ctx;
|
||||
|
||||
@Test
|
||||
void contextLoads() {
|
||||
// verifies that the Spring context starts successfully with all beans wired,
|
||||
// Flyway migrations applied, and no configuration errors
|
||||
}
|
||||
|
||||
@Test
|
||||
void sentry_is_disabled_when_no_dsn_is_configured() {
|
||||
// application-test.yaml has no sentry.dsn — SDK must stay inactive so tests are clean
|
||||
assertThat(io.sentry.Sentry.isEnabled()).isFalse();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package org.raddatz.familienarchiv.audit;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.raddatz.familienarchiv.PostgresContainerConfig;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.transaction.support.TransactionTemplate;
|
||||
@@ -18,7 +18,6 @@ import static org.awaitility.Awaitility.await;
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
|
||||
class AuditServiceIntegrationTest {
|
||||
|
||||
@MockitoBean S3Client s3Client;
|
||||
@@ -26,6 +25,11 @@ class AuditServiceIntegrationTest {
|
||||
@Autowired AuditLogRepository auditLogRepository;
|
||||
@Autowired TransactionTemplate transactionTemplate;
|
||||
|
||||
@BeforeEach
|
||||
void resetAuditLog() {
|
||||
auditLogRepository.deleteAll();
|
||||
}
|
||||
|
||||
@Test
|
||||
void logAfterCommit_writes_ANNOTATION_CREATED_row_after_transaction_commits() {
|
||||
transactionTemplate.execute(status -> {
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
package org.raddatz.familienarchiv.auth;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.raddatz.familienarchiv.audit.AuditKind;
|
||||
import org.raddatz.familienarchiv.audit.AuditService;
|
||||
import org.raddatz.familienarchiv.exception.DomainException;
|
||||
import org.raddatz.familienarchiv.exception.ErrorCode;
|
||||
import org.raddatz.familienarchiv.user.AppUser;
|
||||
import org.raddatz.familienarchiv.user.UserService;
|
||||
import org.springframework.security.authentication.AuthenticationManager;
|
||||
import org.springframework.security.authentication.BadCredentialsException;
|
||||
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
|
||||
import org.springframework.security.core.Authentication;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
import static org.mockito.ArgumentMatchers.*;
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class AuthServiceTest {
|
||||
|
||||
@Mock AuthenticationManager authenticationManager;
|
||||
@Mock UserService userService;
|
||||
@Mock AuditService auditService;
|
||||
@InjectMocks AuthService authService;
|
||||
|
||||
private static final String IP = "127.0.0.1";
|
||||
private static final String UA = "Mozilla/5.0 (Test)";
|
||||
|
||||
@Test
|
||||
void login_returns_user_on_valid_credentials() {
|
||||
UUID userId = UUID.randomUUID();
|
||||
AppUser user = AppUser.builder().id(userId).email("user@test.de").build();
|
||||
Authentication auth = new UsernamePasswordAuthenticationToken("user@test.de", null, Set.of());
|
||||
when(authenticationManager.authenticate(any())).thenReturn(auth);
|
||||
when(userService.findByEmail("user@test.de")).thenReturn(user);
|
||||
|
||||
AuthService.LoginResult result = authService.login("user@test.de", "pass123", IP, UA);
|
||||
|
||||
assertThat(result.user()).isEqualTo(user);
|
||||
assertThat(result.authentication()).isEqualTo(auth);
|
||||
}
|
||||
|
||||
@Test
|
||||
void login_fires_LOGIN_SUCCESS_audit_on_valid_credentials() {
|
||||
UUID userId = UUID.randomUUID();
|
||||
AppUser user = AppUser.builder().id(userId).email("user@test.de").build();
|
||||
Authentication auth = new UsernamePasswordAuthenticationToken("user@test.de", null, Set.of());
|
||||
when(authenticationManager.authenticate(any())).thenReturn(auth);
|
||||
when(userService.findByEmail("user@test.de")).thenReturn(user);
|
||||
|
||||
authService.login("user@test.de", "pass123", IP, UA);
|
||||
|
||||
verify(auditService).log(
|
||||
eq(AuditKind.LOGIN_SUCCESS),
|
||||
eq(userId),
|
||||
isNull(),
|
||||
argThat(payload -> userId.toString().equals(payload.get("userId").toString())
|
||||
&& IP.equals(payload.get("ip"))
|
||||
&& !payload.containsKey("password"))
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void login_throws_INVALID_CREDENTIALS_on_bad_password() {
|
||||
when(authenticationManager.authenticate(any())).thenThrow(new BadCredentialsException("bad"));
|
||||
|
||||
assertThatThrownBy(() -> authService.login("user@test.de", "wrong", IP, UA))
|
||||
.isInstanceOf(DomainException.class)
|
||||
.satisfies(ex -> assertThat(((DomainException) ex).getCode())
|
||||
.isEqualTo(ErrorCode.INVALID_CREDENTIALS));
|
||||
}
|
||||
|
||||
@Test
|
||||
void login_fires_LOGIN_FAILED_audit_on_bad_credentials_without_password_in_payload() {
|
||||
when(authenticationManager.authenticate(any())).thenThrow(new BadCredentialsException("bad"));
|
||||
|
||||
assertThatThrownBy(() -> authService.login("user@test.de", "wrong", IP, UA))
|
||||
.isInstanceOf(DomainException.class);
|
||||
|
||||
verify(auditService).log(
|
||||
eq(AuditKind.LOGIN_FAILED),
|
||||
isNull(),
|
||||
isNull(),
|
||||
argThat(payload -> "user@test.de".equals(payload.get("email"))
|
||||
&& IP.equals(payload.get("ip"))
|
||||
&& !payload.containsKey("password")
|
||||
&& !payload.containsKey("pwd")
|
||||
&& !payload.containsKey("passwordAttempt"))
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void login_treats_unknown_user_identically_to_bad_password() {
|
||||
when(authenticationManager.authenticate(any()))
|
||||
.thenThrow(new BadCredentialsException("unknown user hidden as bad creds"));
|
||||
|
||||
assertThatThrownBy(() -> authService.login("unknown@test.de", "any", IP, UA))
|
||||
.isInstanceOf(DomainException.class)
|
||||
.satisfies(ex -> assertThat(((DomainException) ex).getCode())
|
||||
.isEqualTo(ErrorCode.INVALID_CREDENTIALS));
|
||||
|
||||
verify(auditService).log(eq(AuditKind.LOGIN_FAILED), isNull(), isNull(), anyMap());
|
||||
}
|
||||
|
||||
@Test
|
||||
void logout_fires_LOGOUT_audit() {
|
||||
UUID userId = UUID.randomUUID();
|
||||
AppUser user = AppUser.builder().id(userId).email("user@test.de").build();
|
||||
when(userService.findByEmail("user@test.de")).thenReturn(user);
|
||||
|
||||
authService.logout("user@test.de", IP, UA);
|
||||
|
||||
verify(auditService).log(
|
||||
eq(AuditKind.LOGOUT),
|
||||
eq(userId),
|
||||
isNull(),
|
||||
argThat(payload -> userId.toString().equals(payload.get("userId").toString())
|
||||
&& IP.equals(payload.get("ip"))
|
||||
&& !payload.containsKey("password"))
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,111 @@
|
||||
package org.raddatz.familienarchiv.auth;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.raddatz.familienarchiv.auth.AuthService.LoginResult;
|
||||
import org.raddatz.familienarchiv.exception.DomainException;
|
||||
import org.raddatz.familienarchiv.exception.ErrorCode;
|
||||
import org.raddatz.familienarchiv.security.SecurityConfig;
|
||||
import org.raddatz.familienarchiv.security.PermissionAspect;
|
||||
import org.raddatz.familienarchiv.user.AppUser;
|
||||
import org.raddatz.familienarchiv.user.CustomUserDetailsService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.aop.AopAutoConfiguration;
|
||||
import org.springframework.boot.webmvc.test.autoconfigure.WebMvcTest;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.test.web.servlet.MockMvc;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.*;
|
||||
import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors.user;
|
||||
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
|
||||
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;
|
||||
|
||||
@WebMvcTest(AuthSessionController.class)
|
||||
@Import({SecurityConfig.class, PermissionAspect.class, AopAutoConfiguration.class})
|
||||
class AuthSessionControllerTest {
|
||||
|
||||
@Autowired MockMvc mockMvc;
|
||||
|
||||
@MockitoBean AuthService authService;
|
||||
@MockitoBean CustomUserDetailsService customUserDetailsService;
|
||||
|
||||
// ─── POST /api/auth/login ──────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void login_returns_200_with_user_on_valid_credentials() throws Exception {
|
||||
UUID userId = UUID.randomUUID();
|
||||
AppUser appUser = AppUser.builder().id(userId).email("user@test.de").build();
|
||||
Authentication auth = mock(Authentication.class);
|
||||
when(authService.login(anyString(), anyString(), anyString(), anyString()))
|
||||
.thenReturn(new LoginResult(appUser, auth));
|
||||
|
||||
mockMvc.perform(post("/api/auth/login")
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.content("{\"email\":\"user@test.de\",\"password\":\"pass123\"}"))
|
||||
.andExpect(status().isOk())
|
||||
.andExpect(jsonPath("$.email").value("user@test.de"))
|
||||
.andExpect(jsonPath("$.id").value(userId.toString()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void login_returns_401_with_INVALID_CREDENTIALS_on_bad_credentials() throws Exception {
|
||||
when(authService.login(anyString(), anyString(), anyString(), anyString()))
|
||||
.thenThrow(DomainException.invalidCredentials());
|
||||
|
||||
mockMvc.perform(post("/api/auth/login")
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.content("{\"email\":\"user@test.de\",\"password\":\"wrong\"}"))
|
||||
.andExpect(status().isUnauthorized())
|
||||
.andExpect(jsonPath("$.code").value(ErrorCode.INVALID_CREDENTIALS.name()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void login_is_public_no_session_required() throws Exception {
|
||||
UUID userId = UUID.randomUUID();
|
||||
AppUser appUser = AppUser.builder().id(userId).email("pub@test.de").build();
|
||||
Authentication auth = mock(Authentication.class);
|
||||
when(authService.login(anyString(), anyString(), anyString(), anyString()))
|
||||
.thenReturn(new LoginResult(appUser, auth));
|
||||
|
||||
// No WithMockUser — must be reachable without an active session
|
||||
mockMvc.perform(post("/api/auth/login")
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.content("{\"email\":\"pub@test.de\",\"password\":\"pass\"}"))
|
||||
.andExpect(status().isOk());
|
||||
}
|
||||
|
||||
@Test
|
||||
void login_does_not_set_cookie_on_failure() throws Exception {
|
||||
when(authService.login(anyString(), anyString(), anyString(), anyString()))
|
||||
.thenThrow(DomainException.invalidCredentials());
|
||||
|
||||
mockMvc.perform(post("/api/auth/login")
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.content("{\"email\":\"user@test.de\",\"password\":\"wrong\"}"))
|
||||
.andExpect(status().isUnauthorized())
|
||||
.andExpect(header().doesNotExist("Set-Cookie"));
|
||||
}
|
||||
|
||||
// ─── POST /api/auth/logout ─────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void logout_returns_204_when_authenticated() throws Exception {
|
||||
doNothing().when(authService).logout(anyString(), anyString(), anyString());
|
||||
|
||||
mockMvc.perform(post("/api/auth/logout")
|
||||
.with(user("user@test.de")))
|
||||
.andExpect(status().isNoContent());
|
||||
}
|
||||
|
||||
@Test
|
||||
void logout_returns_401_when_not_authenticated() throws Exception {
|
||||
// No authentication at all — Spring Security must return 401
|
||||
mockMvc.perform(post("/api/auth/logout"))
|
||||
.andExpect(status().isUnauthorized());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,154 @@
|
||||
package org.raddatz.familienarchiv.auth;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.raddatz.familienarchiv.PostgresContainerConfig;
|
||||
import org.raddatz.familienarchiv.user.AppUser;
|
||||
import org.raddatz.familienarchiv.user.AppUserRepository;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.web.server.LocalServerPort;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.http.HttpEntity;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.http.client.ClientHttpResponse;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.security.crypto.password.PasswordEncoder;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.web.client.DefaultResponseErrorHandler;
|
||||
import org.springframework.web.client.RestTemplate;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
class AuthSessionIntegrationTest {
|
||||
|
||||
@LocalServerPort int port;
|
||||
@MockitoBean S3Client s3Client;
|
||||
@Autowired AppUserRepository userRepository;
|
||||
@Autowired PasswordEncoder passwordEncoder;
|
||||
@Autowired JdbcTemplate jdbcTemplate;
|
||||
|
||||
private RestTemplate http;
|
||||
private String baseUrl;
|
||||
|
||||
private static final String TEST_EMAIL = "session-it@test.de";
|
||||
private static final String TEST_PASSWORD = "pass4Session!";
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
http = noThrowRestTemplate();
|
||||
baseUrl = "http://localhost:" + port;
|
||||
// spring_session_attributes cascades on delete — removing the parent row is enough
|
||||
jdbcTemplate.update("DELETE FROM spring_session");
|
||||
jdbcTemplate.update("DELETE FROM app_users WHERE email = ?", TEST_EMAIL);
|
||||
userRepository.save(AppUser.builder()
|
||||
.email(TEST_EMAIL)
|
||||
.password(passwordEncoder.encode(TEST_PASSWORD))
|
||||
.build());
|
||||
}
|
||||
|
||||
// ─── Task 13: full session lifecycle ──────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void login_sets_opaque_fa_session_cookie() {
|
||||
ResponseEntity<String> response = doLogin();
|
||||
|
||||
assertThat(response.getStatusCode().value()).isEqualTo(200);
|
||||
String cookie = extractFaSessionCookie(response);
|
||||
assertThat(cookie).isNotBlank();
|
||||
// Opaque token — must not look like Basic-auth credentials (email:password)
|
||||
assertThat(cookie).doesNotContain(":");
|
||||
}
|
||||
|
||||
@Test
|
||||
void session_cookie_authenticates_subsequent_request() {
|
||||
String cookie = extractFaSessionCookie(doLogin());
|
||||
|
||||
ResponseEntity<String> me = http.exchange(
|
||||
baseUrl + "/api/users/me", HttpMethod.GET,
|
||||
new HttpEntity<>(cookieHeaders(cookie)), String.class);
|
||||
|
||||
assertThat(me.getStatusCode().value()).isEqualTo(200);
|
||||
}
|
||||
|
||||
@Test
|
||||
void logout_invalidates_session_and_cookie_returns_401_on_reuse() {
|
||||
String cookie = extractFaSessionCookie(doLogin());
|
||||
|
||||
ResponseEntity<Void> logout = http.postForEntity(
|
||||
baseUrl + "/api/auth/logout",
|
||||
new HttpEntity<>(cookieHeaders(cookie)), Void.class);
|
||||
assertThat(logout.getStatusCode().value()).isEqualTo(204);
|
||||
|
||||
ResponseEntity<String> me = http.exchange(
|
||||
baseUrl + "/api/users/me", HttpMethod.GET,
|
||||
new HttpEntity<>(cookieHeaders(cookie)), String.class);
|
||||
assertThat(me.getStatusCode().value()).isEqualTo(401);
|
||||
}
|
||||
|
||||
// ─── Task 14: idle-timeout ────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void session_expired_by_idle_timeout_returns_401() {
|
||||
String cookie = extractFaSessionCookie(doLogin());
|
||||
|
||||
// Backdate LAST_ACCESS_TIME by 9 hours so lastAccess + maxInactiveInterval(8h) < now
|
||||
long nineHoursAgoMs = System.currentTimeMillis() - 9L * 3600 * 1000;
|
||||
jdbcTemplate.update(
|
||||
"UPDATE spring_session SET LAST_ACCESS_TIME = ?, EXPIRY_TIME = ?",
|
||||
nineHoursAgoMs, nineHoursAgoMs);
|
||||
|
||||
ResponseEntity<String> me = http.exchange(
|
||||
baseUrl + "/api/users/me", HttpMethod.GET,
|
||||
new HttpEntity<>(cookieHeaders(cookie)), String.class);
|
||||
assertThat(me.getStatusCode().value()).isEqualTo(401);
|
||||
}
|
||||
|
||||
// ─── helpers ─────────────────────────────────────────────────────────────
|
||||
|
||||
private ResponseEntity<String> doLogin() {
|
||||
HttpHeaders headers = new HttpHeaders();
|
||||
headers.setContentType(MediaType.APPLICATION_JSON);
|
||||
String body = "{\"email\":\"" + TEST_EMAIL + "\",\"password\":\"" + TEST_PASSWORD + "\"}";
|
||||
return http.postForEntity(baseUrl + "/api/auth/login",
|
||||
new HttpEntity<>(body, headers), String.class);
|
||||
}
|
||||
|
||||
private HttpHeaders cookieHeaders(String sessionId) {
|
||||
HttpHeaders headers = new HttpHeaders();
|
||||
headers.set("Cookie", "fa_session=" + sessionId);
|
||||
return headers;
|
||||
}
|
||||
|
||||
private String extractFaSessionCookie(ResponseEntity<?> response) {
|
||||
List<String> setCookieHeader = response.getHeaders().get("Set-Cookie");
|
||||
if (setCookieHeader == null) return "";
|
||||
return setCookieHeader.stream()
|
||||
.filter(c -> c.startsWith("fa_session="))
|
||||
.map(c -> c.split(";")[0].substring("fa_session=".length()))
|
||||
.findFirst()
|
||||
.orElse("");
|
||||
}
|
||||
|
||||
private RestTemplate noThrowRestTemplate() {
|
||||
RestTemplate template = new RestTemplate();
|
||||
template.setErrorHandler(new DefaultResponseErrorHandler() {
|
||||
@Override
|
||||
public boolean hasError(ClientHttpResponse response) throws IOException {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
return template;
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,9 @@ import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.data.domain.PageRequest;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import java.time.LocalDate;
|
||||
@@ -33,7 +33,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
|
||||
@Transactional
|
||||
class DocumentSearchPagedIntegrationTest {
|
||||
|
||||
private static final int FIXTURE_SIZE = 120;
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
package org.raddatz.familienarchiv.exception;
|
||||
|
||||
import io.sentry.Sentry;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.Mockito.mockStatic;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class GlobalExceptionHandlerTest {
|
||||
|
||||
@InjectMocks
|
||||
private GlobalExceptionHandler handler;
|
||||
|
||||
@Test
|
||||
void handleGeneric_captures_exception_in_sentry_and_returns_500() {
|
||||
RuntimeException ex = new RuntimeException("unexpected failure");
|
||||
|
||||
try (MockedStatic<Sentry> sentryMock = mockStatic(Sentry.class)) {
|
||||
ResponseEntity<GlobalExceptionHandler.ErrorResponse> response = handler.handleGeneric(ex);
|
||||
|
||||
sentryMock.verify(() -> Sentry.captureException(ex));
|
||||
assertThat(response.getStatusCode().value()).isEqualTo(500);
|
||||
assertThat(response.getBody()).isNotNull();
|
||||
assertThat(response.getBody().code()).isEqualTo(ErrorCode.INTERNAL_ERROR);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,9 @@ import org.springframework.context.annotation.Import;
|
||||
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
|
||||
import org.springframework.security.core.authority.SimpleGrantedAuthority;
|
||||
import org.springframework.security.core.context.SecurityContextHolder;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import java.util.List;
|
||||
@@ -32,7 +32,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
|
||||
@Transactional
|
||||
class GeschichteServiceIntegrationTest {
|
||||
|
||||
@MockitoBean
|
||||
|
||||
@@ -20,7 +20,13 @@ import software.amazon.awssdk.core.sync.RequestBody;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
|
||||
|
||||
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
|
||||
import org.xml.sax.SAXParseException;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.OutputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.time.LocalDate;
|
||||
@@ -29,6 +35,8 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
@@ -70,14 +78,20 @@ class MassImportServiceTest {
|
||||
assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.IDLE);
|
||||
}
|
||||
|
||||
@Test
|
||||
void getStatus_hasStatusCode_IMPORT_IDLE_byDefault() {
|
||||
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_IDLE");
|
||||
}
|
||||
|
||||
// ─── runImportAsync ───────────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void runImportAsync_setsFailedStatus_whenImportDirectoryDoesNotExist() {
|
||||
// /import directory doesn't exist in test environment → findSpreadsheetFile throws
|
||||
// /import directory doesn't exist in test environment → IOException → IMPORT_FAILED_INTERNAL
|
||||
service.runImportAsync();
|
||||
|
||||
assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.FAILED);
|
||||
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_FAILED_INTERNAL");
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -93,10 +107,35 @@ class MassImportServiceTest {
|
||||
assertThat(service.getStatus().message()).contains(tempDir.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
void runImportAsync_setsStatusCode_IMPORT_FAILED_NO_SPREADSHEET_whenDirIsEmpty(@TempDir Path tempDir) {
|
||||
ReflectionTestUtils.setField(service, "importDir", tempDir.toString());
|
||||
|
||||
service.runImportAsync();
|
||||
|
||||
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_FAILED_NO_SPREADSHEET");
|
||||
}
|
||||
|
||||
@Test
|
||||
void runImportAsync_setsStatusCode_IMPORT_DONE_whenSpreadsheetHasNoDataRows(@TempDir Path tempDir) throws Exception {
|
||||
Path xlsx = tempDir.resolve("import.xlsx");
|
||||
try (XSSFWorkbook wb = new XSSFWorkbook()) {
|
||||
wb.createSheet("Sheet1");
|
||||
try (OutputStream out = Files.newOutputStream(xlsx)) {
|
||||
wb.write(out);
|
||||
}
|
||||
}
|
||||
ReflectionTestUtils.setField(service, "importDir", tempDir.toString());
|
||||
|
||||
service.runImportAsync();
|
||||
|
||||
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_DONE");
|
||||
}
|
||||
|
||||
@Test
|
||||
void runImportAsync_throwsConflict_whenAlreadyRunning() {
|
||||
MassImportService.ImportStatus running = new MassImportService.ImportStatus(
|
||||
MassImportService.State.RUNNING, "Running...", 0, LocalDateTime.now());
|
||||
MassImportService.State.RUNNING, "IMPORT_RUNNING", "Running...", 0, LocalDateTime.now());
|
||||
ReflectionTestUtils.setField(service, "currentStatus", running);
|
||||
|
||||
assertThatThrownBy(() -> service.runImportAsync())
|
||||
@@ -486,6 +525,25 @@ class MassImportServiceTest {
|
||||
assertThat(result).isEqualTo("hello");
|
||||
}
|
||||
|
||||
// ─── readOds — XXE security regression ───────────────────────────────────
|
||||
|
||||
// Security regression — do not remove.
|
||||
@Test
|
||||
void readOds_rejects_xxe_doctype_payload(@TempDir Path tempDir) throws Exception {
|
||||
File malicious = buildXxeOds(tempDir, "file:///etc/hostname");
|
||||
assertThatThrownBy(() -> service.readOds(malicious))
|
||||
.isInstanceOf(SAXParseException.class)
|
||||
.hasMessageContaining("DOCTYPE is disallowed");
|
||||
}
|
||||
|
||||
@Test
|
||||
void readOds_parses_valid_ods_correctly(@TempDir Path tempDir) throws Exception {
|
||||
File valid = buildValidOds(tempDir, "Mustermann");
|
||||
List<List<String>> rows = service.readOds(valid);
|
||||
assertThat(rows).isNotEmpty();
|
||||
assertThat(rows.get(0)).contains("Mustermann");
|
||||
}
|
||||
|
||||
// ─── helpers ──────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
@@ -519,4 +577,48 @@ class MassImportServiceTest {
|
||||
"" // 13: transcription
|
||||
);
|
||||
}
|
||||
|
||||
/** Creates a minimal ODS ZIP containing a content.xml with an XXE payload. */
|
||||
private File buildXxeOds(Path dir, String entityTarget) throws Exception {
|
||||
String xml = "<?xml version=\"1.0\"?>"
|
||||
+ "<!DOCTYPE foo [<!ENTITY xxe SYSTEM \"" + entityTarget + "\">]>"
|
||||
+ "<office:document-content"
|
||||
+ " xmlns:office=\"urn:oasis:names:tc:opendocument:xmlns:office:1.0\""
|
||||
+ " xmlns:table=\"urn:oasis:names:tc:opendocument:xmlns:table:1.0\""
|
||||
+ " xmlns:text=\"urn:oasis:names:tc:opendocument:xmlns:text:1.0\">"
|
||||
+ "<office:body><office:spreadsheet>"
|
||||
+ "<table:table><table:table-row><table:table-cell>"
|
||||
+ "<text:p>&xxe;</text:p>"
|
||||
+ "</table:table-cell></table:table-row></table:table>"
|
||||
+ "</office:spreadsheet></office:body>"
|
||||
+ "</office:document-content>";
|
||||
return writeOdsZip(dir.resolve("malicious.ods"), xml);
|
||||
}
|
||||
|
||||
/** Creates a minimal valid ODS ZIP containing a content.xml with the given cell value.
|
||||
* cellValue must not contain XML metacharacters ({@code < > &}). */
|
||||
private File buildValidOds(Path dir, String cellValue) throws Exception {
|
||||
String xml = "<?xml version=\"1.0\"?>"
|
||||
+ "<office:document-content"
|
||||
+ " xmlns:office=\"urn:oasis:names:tc:opendocument:xmlns:office:1.0\""
|
||||
+ " xmlns:table=\"urn:oasis:names:tc:opendocument:xmlns:table:1.0\""
|
||||
+ " xmlns:text=\"urn:oasis:names:tc:opendocument:xmlns:text:1.0\">"
|
||||
+ "<office:body><office:spreadsheet>"
|
||||
+ "<table:table><table:table-row><table:table-cell>"
|
||||
+ "<text:p>" + cellValue + "</text:p>"
|
||||
+ "</table:table-cell></table:table-row></table:table>"
|
||||
+ "</office:spreadsheet></office:body>"
|
||||
+ "</office:document-content>";
|
||||
return writeOdsZip(dir.resolve("valid.ods"), xml);
|
||||
}
|
||||
|
||||
private File writeOdsZip(Path destination, String contentXml) throws Exception {
|
||||
try (OutputStream fos = Files.newOutputStream(destination);
|
||||
ZipOutputStream zip = new ZipOutputStream(fos)) {
|
||||
zip.putNextEntry(new ZipEntry("content.xml"));
|
||||
zip.write(contentXml.getBytes(StandardCharsets.UTF_8));
|
||||
zip.closeEntry();
|
||||
}
|
||||
return destination.toFile();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,9 +8,9 @@ import org.raddatz.familienarchiv.person.PersonRepository;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
@@ -18,7 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
|
||||
@Transactional
|
||||
class PersonServiceIntegrationTest {
|
||||
|
||||
@MockitoBean S3Client s3Client;
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
package org.raddatz.familienarchiv.security;
|
||||
|
||||
import jakarta.servlet.FilterChain;
|
||||
import jakarta.servlet.http.Cookie;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.springframework.mock.web.MockHttpServletRequest;
|
||||
import org.springframework.mock.web.MockHttpServletResponse;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* The filter must turn a browser-side {@code Cookie: auth_token=Basic%20<base64>}
|
||||
* into {@code Authorization: Basic <base64>} (URL-decoded) so that Spring's
|
||||
* Basic-auth filter accepts it. Skips when the request already has an explicit
|
||||
* {@code Authorization} header, or when no {@code auth_token} cookie is present.
|
||||
*
|
||||
* <p>See #520.
|
||||
*/
|
||||
class AuthTokenCookieFilterTest {
|
||||
|
||||
private final AuthTokenCookieFilter filter = new AuthTokenCookieFilter();
|
||||
|
||||
@Test
|
||||
void promotes_url_encoded_auth_token_cookie_to_decoded_Authorization_header() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.setCookies(new Cookie("auth_token", "Basic%20YWRtaW5AZmFtaWx5YXJjaGl2ZS5sb2NhbDpzZWNyZXQ%3D"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
ArgumentCaptor<HttpServletRequest> captor = ArgumentCaptor.forClass(HttpServletRequest.class);
|
||||
verify(chain, times(1)).doFilter(captor.capture(), org.mockito.ArgumentMatchers.any(HttpServletResponse.class));
|
||||
|
||||
HttpServletRequest forwarded = captor.getValue();
|
||||
assertThat(forwarded.getHeader("Authorization"))
|
||||
.as("Authorization must be URL-decoded so Spring's Basic parser sees a literal space")
|
||||
.isEqualTo("Basic YWRtaW5AZmFtaWx5YXJjaGl2ZS5sb2NhbDpzZWNyZXQ=");
|
||||
}
|
||||
|
||||
@Test
|
||||
void preserves_explicit_Authorization_header_and_ignores_cookie() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.addHeader("Authorization", "Basic explicit-header-wins");
|
||||
req.setCookies(new Cookie("auth_token", "Basic%20cookie-would-have-promoted"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
// Forwards the original request unchanged — same instance, no wrapping.
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_when_no_cookies_at_all() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_when_auth_token_cookie_is_absent() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.setCookies(new Cookie("some_other_cookie", "value"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_when_auth_token_cookie_is_empty() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.setCookies(new Cookie("auth_token", ""));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_unchanged_when_request_is_outside_api_scope() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
// /actuator/health and similar must NOT receive a promoted Authorization
|
||||
// header — they have their own access rules and should never be authed
|
||||
// via the cookie.
|
||||
req.setRequestURI("/actuator/health");
|
||||
req.setCookies(new Cookie("auth_token", "Basic%20YWR=="));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
// Forwards the original request unchanged — same instance, no wrapping.
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_unchanged_when_cookie_value_is_malformed_percent_encoding() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
// Lone "%" without two hex digits → URLDecoder throws → filter must
|
||||
// refuse to forward a bogus Authorization header.
|
||||
req.setCookies(new Cookie("auth_token", "Basic%2"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
// Forwards the original request unchanged — Spring Security treats it
|
||||
// as unauthenticated rather than crashing on bad input.
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
}
|
||||
@@ -40,6 +40,47 @@ class AdminControllerTest {
|
||||
@MockitoBean ThumbnailBackfillService thumbnailBackfillService;
|
||||
@MockitoBean CustomUserDetailsService customUserDetailsService;
|
||||
|
||||
// ─── GET /api/admin/import-status ─────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
@WithMockUser(authorities = "ADMIN")
|
||||
void importStatus_returns200_withStatusCode_whenAdmin() throws Exception {
|
||||
MassImportService.ImportStatus status = new MassImportService.ImportStatus(
|
||||
MassImportService.State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
|
||||
when(massImportService.getStatus()).thenReturn(status);
|
||||
|
||||
mockMvc.perform(get("/api/admin/import-status"))
|
||||
.andExpect(status().isOk())
|
||||
.andExpect(jsonPath("$.state").value("IDLE"))
|
||||
.andExpect(jsonPath("$.statusCode").value("IMPORT_IDLE"))
|
||||
.andExpect(jsonPath("$.processed").value(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser(authorities = "ADMIN")
|
||||
void importStatus_messageField_notPresentInApiResponse() throws Exception {
|
||||
MassImportService.ImportStatus status = new MassImportService.ImportStatus(
|
||||
MassImportService.State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
|
||||
when(massImportService.getStatus()).thenReturn(status);
|
||||
|
||||
mockMvc.perform(get("/api/admin/import-status"))
|
||||
.andExpect(status().isOk())
|
||||
.andExpect(jsonPath("$.message").doesNotExist());
|
||||
}
|
||||
|
||||
@Test
|
||||
void importStatus_returns401_whenUnauthenticated() throws Exception {
|
||||
mockMvc.perform(get("/api/admin/import-status"))
|
||||
.andExpect(status().isUnauthorized());
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser(authorities = "READ_ALL")
|
||||
void importStatus_returns403_whenUserLacksAdminPermission() throws Exception {
|
||||
mockMvc.perform(get("/api/admin/import-status"))
|
||||
.andExpect(status().isForbidden());
|
||||
}
|
||||
|
||||
@Test
|
||||
void backfillVersions_returns401_whenUnauthenticated() throws Exception {
|
||||
mockMvc.perform(post("/api/admin/backfill-versions"))
|
||||
|
||||
@@ -20,10 +20,13 @@ import org.springframework.security.test.context.support.WithMockUser;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.test.web.servlet.MockMvc;
|
||||
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.*;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
@@ -147,6 +150,30 @@ class InviteControllerTest {
|
||||
.andExpect(jsonPath("$.label").value("Für Familie"));
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser(username = "admin@test.com", authorities = {"ADMIN_USER"})
|
||||
void createInvite_forwardsGroupIdsToService() throws Exception {
|
||||
UUID groupId = UUID.randomUUID();
|
||||
AppUser admin = AppUser.builder().id(UUID.randomUUID()).email("admin@test.com").build();
|
||||
when(userService.findByEmail("admin@test.com")).thenReturn(admin);
|
||||
|
||||
InviteToken savedToken = InviteToken.builder()
|
||||
.id(UUID.randomUUID()).code("ABCDE12345").useCount(0).build();
|
||||
when(inviteService.createInvite(any(), eq(admin))).thenReturn(savedToken);
|
||||
when(inviteService.toListItemDTO(any(), anyString()))
|
||||
.thenReturn(makeInviteDTO(savedToken.getId(), "ABCDE12345"));
|
||||
|
||||
String body = "{\"groupIds\":[\"" + groupId + "\"]}";
|
||||
mockMvc.perform(post("/api/invites")
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.content(body))
|
||||
.andExpect(status().isCreated());
|
||||
|
||||
ArgumentCaptor<CreateInviteRequest> captor = ArgumentCaptor.forClass(CreateInviteRequest.class);
|
||||
verify(inviteService).createInvite(captor.capture(), eq(admin));
|
||||
assertThat(captor.getValue().getGroupIds()).containsExactly(groupId);
|
||||
}
|
||||
|
||||
// ─── DELETE /api/invites/{id} ─────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
|
||||
@@ -156,6 +156,35 @@ class InviteServiceTest {
|
||||
assertThat(result.getGroupIds()).contains(g.getId());
|
||||
}
|
||||
|
||||
@Test
|
||||
void createInvite_throwsGroupNotFound_whenSubmittedGroupIdDoesNotExist() {
|
||||
UUID unknownGroupId = UUID.randomUUID();
|
||||
when(userService.findGroupsByIds(anyList())).thenReturn(List.of());
|
||||
|
||||
CreateInviteRequest req = new CreateInviteRequest();
|
||||
req.setGroupIds(List.of(unknownGroupId));
|
||||
|
||||
assertThatThrownBy(() -> inviteService.createInvite(req, admin))
|
||||
.isInstanceOf(DomainException.class)
|
||||
.extracting(e -> ((DomainException) e).getCode())
|
||||
.isEqualTo(ErrorCode.GROUP_NOT_FOUND);
|
||||
}
|
||||
|
||||
@Test
|
||||
void createInvite_doesNotThrowGroupNotFound_whenDuplicateGroupIdsSubmitted() {
|
||||
UUID groupId = UUID.randomUUID();
|
||||
UserGroup group = UserGroup.builder().id(groupId).name("Familie").build();
|
||||
when(inviteTokenRepository.findByCode(anyString())).thenReturn(Optional.empty());
|
||||
when(userService.findGroupsByIds(anyList())).thenReturn(List.of(group));
|
||||
when(inviteTokenRepository.save(any())).thenAnswer(inv -> inv.getArgument(0));
|
||||
|
||||
CreateInviteRequest req = new CreateInviteRequest();
|
||||
req.setGroupIds(List.of(groupId, groupId)); // same UUID submitted twice
|
||||
|
||||
// before deduplication: size(groups)==1 != size(submitted)==2 → false GROUP_NOT_FOUND
|
||||
assertThatCode(() -> inviteService.createInvite(req, admin)).doesNotThrowAnyException();
|
||||
}
|
||||
|
||||
// ─── redeemInvite ─────────────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
package org.raddatz.familienarchiv.user;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.raddatz.familienarchiv.PostgresContainerConfig;
|
||||
import org.raddatz.familienarchiv.config.FlywayConfig;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.jdbc.test.autoconfigure.AutoConfigureTestDatabase;
|
||||
import org.springframework.boot.data.jpa.test.autoconfigure.DataJpaTest;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@DataJpaTest
|
||||
@AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
|
||||
@Import({PostgresContainerConfig.class, FlywayConfig.class})
|
||||
class InviteTokenRepositoryIntegrationTest {
|
||||
|
||||
@Autowired InviteTokenRepository inviteTokenRepository;
|
||||
@Autowired UserGroupRepository userGroupRepository;
|
||||
@Autowired AppUserRepository appUserRepository;
|
||||
|
||||
private UserGroup group;
|
||||
private AppUser admin;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
inviteTokenRepository.deleteAll();
|
||||
userGroupRepository.deleteAll();
|
||||
appUserRepository.deleteAll();
|
||||
admin = appUserRepository.save(AppUser.builder().email("admin@test.com").password("pw").build());
|
||||
group = userGroupRepository.save(UserGroup.builder().name("Familie").build());
|
||||
}
|
||||
|
||||
// ─── existsActiveWithGroupId ──────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void existsActiveWithGroupId_returnsTrueForActiveInviteLinkedToGroup() {
|
||||
inviteTokenRepository.save(token(t -> t));
|
||||
|
||||
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isTrue();
|
||||
}
|
||||
|
||||
@Test
|
||||
void existsActiveWithGroupId_returnsFalseWhenInviteIsRevoked() {
|
||||
inviteTokenRepository.save(token(t -> t.revoked(true)));
|
||||
|
||||
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
void existsActiveWithGroupId_returnsFalseWhenInviteIsExpired() {
|
||||
inviteTokenRepository.save(token(t -> t.expiresAt(LocalDateTime.now().minusDays(1))));
|
||||
|
||||
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
void existsActiveWithGroupId_returnsFalseWhenInviteIsExhausted() {
|
||||
inviteTokenRepository.save(token(t -> t.maxUses(1).useCount(1)));
|
||||
|
||||
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
|
||||
}
|
||||
|
||||
// ─── helpers ─────────────────────────────────────────────────────────────
|
||||
|
||||
private InviteToken token(java.util.function.UnaryOperator<InviteToken.InviteTokenBuilder> customizer) {
|
||||
InviteToken.InviteTokenBuilder builder = InviteToken.builder()
|
||||
.code(UUID.randomUUID().toString().replace("-", "").substring(0, 10))
|
||||
.groupIds(new java.util.HashSet<>(Set.of(group.getId())))
|
||||
.createdBy(admin);
|
||||
return customizer.apply(builder).build();
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,7 @@ class UserServiceTest {
|
||||
|
||||
@Mock AppUserRepository userRepository;
|
||||
@Mock UserGroupRepository groupRepository;
|
||||
@Mock InviteTokenRepository inviteTokenRepository;
|
||||
@Mock PasswordEncoder passwordEncoder;
|
||||
@Mock AuditService auditService;
|
||||
@InjectMocks UserService userService;
|
||||
@@ -903,6 +904,29 @@ class UserServiceTest {
|
||||
assertThat(result.getPermissions()).containsExactlyInAnyOrder("READ_ALL", "WRITE_ALL");
|
||||
}
|
||||
|
||||
// ─── deleteGroup ──────────────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void deleteGroup_throwsConflict_whenActiveInviteReferencesGroup() {
|
||||
UUID groupId = UUID.randomUUID();
|
||||
when(inviteTokenRepository.existsActiveWithGroupId(groupId)).thenReturn(true);
|
||||
|
||||
assertThatThrownBy(() -> userService.deleteGroup(groupId))
|
||||
.isInstanceOf(DomainException.class)
|
||||
.extracting(e -> ((DomainException) e).getCode())
|
||||
.isEqualTo(ErrorCode.GROUP_HAS_ACTIVE_INVITES);
|
||||
}
|
||||
|
||||
@Test
|
||||
void deleteGroup_deletesGroup_whenNoActiveInviteReferencesGroup() {
|
||||
UUID groupId = UUID.randomUUID();
|
||||
when(inviteTokenRepository.existsActiveWithGroupId(groupId)).thenReturn(false);
|
||||
|
||||
userService.deleteGroup(groupId);
|
||||
|
||||
verify(groupRepository).deleteById(groupId);
|
||||
}
|
||||
|
||||
@Test
|
||||
void createGroup_withNullPermissions_savesGroupWithEmptyPermissionSet() {
|
||||
org.raddatz.familienarchiv.user.GroupDTO dto = new org.raddatz.familienarchiv.user.GroupDTO();
|
||||
|
||||
@@ -13,3 +13,18 @@ spring:
|
||||
password: test
|
||||
mail:
|
||||
host: localhost
|
||||
|
||||
# Disable OTel SDK entirely in tests — prevents auto-configuration from loading resource providers
|
||||
# (e.g. AzureAppServiceResourceProvider) that fail against the semconv version used here.
|
||||
otel:
|
||||
sdk:
|
||||
disabled: true
|
||||
|
||||
# Disable trace export in tests — prevents OTLP connection attempts when no Tempo is running.
|
||||
# Sampling probability 0.0 means no spans are created, so no export is attempted.
|
||||
management:
|
||||
server:
|
||||
port: 0 # random port per context — prevents TIME_WAIT conflicts when @DirtiesContext restarts the context
|
||||
tracing:
|
||||
sampling:
|
||||
probability: 0.0
|
||||
|
||||
2
backend/src/test/resources/application.properties
Normal file
2
backend/src/test/resources/application.properties
Normal file
@@ -0,0 +1,2 @@
|
||||
logging.level.root=WARN
|
||||
logging.level.org.raddatz=INFO
|
||||
266
docker-compose.observability.yml
Normal file
266
docker-compose.observability.yml
Normal file
@@ -0,0 +1,266 @@
|
||||
# Observability stack — Grafana LGTM + GlitchTip
|
||||
#
|
||||
# Requires the main stack to be running first:
|
||||
# docker compose up -d # creates archiv-net
|
||||
# docker compose -f docker-compose.observability.yml up -d
|
||||
#
|
||||
# To validate without starting:
|
||||
# docker compose -f docker-compose.observability.yml config
|
||||
|
||||
services:
|
||||
|
||||
# --- Metrics: Prometheus ---
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.0
|
||||
container_name: obs-prometheus
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./infra/observability/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--storage.tsdb.retention.time=30d'
|
||||
- '--web.enable-lifecycle'
|
||||
ports:
|
||||
- "127.0.0.1:${PORT_PROMETHEUS:-9090}:9090"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-qO-", "http://localhost:9090/-/healthy"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
networks:
|
||||
- archiv-net
|
||||
- obs-net
|
||||
|
||||
node-exporter:
|
||||
image: prom/node-exporter:v1.9.0
|
||||
container_name: obs-node-exporter
|
||||
restart: unless-stopped
|
||||
# pid: host — required for process-level CPU/memory metrics; cgroup isolation applies
|
||||
pid: host
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/rootfs:ro
|
||||
command:
|
||||
- '--path.procfs=/host/proc'
|
||||
- '--path.sysfs=/host/sys'
|
||||
# $$ is YAML Compose escaping for a literal $ in the regex alternation
|
||||
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
|
||||
expose:
|
||||
- "9100"
|
||||
networks:
|
||||
- obs-net
|
||||
|
||||
cadvisor:
|
||||
image: gcr.io/cadvisor/cadvisor:v0.52.1
|
||||
container_name: obs-cadvisor
|
||||
restart: unless-stopped
|
||||
# privileged: true — required for cgroup and namespace metrics, see cAdvisor docs.
|
||||
# Accepted risk: cAdvisor is pinned, on Renovate, and not exposed outside obs-net.
|
||||
privileged: true
|
||||
volumes:
|
||||
- /:/rootfs:ro
|
||||
# /var/run/docker.sock mounted read-only — sufficient for container metadata discovery
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /sys:/sys:ro
|
||||
- /var/lib/docker:/var/lib/docker:ro
|
||||
expose:
|
||||
- "8080"
|
||||
networks:
|
||||
- obs-net
|
||||
|
||||
# --- Logs: Loki + Promtail ---
|
||||
|
||||
loki:
|
||||
image: grafana/loki:3.4.2
|
||||
container_name: obs-loki
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./infra/observability/loki/loki-config.yml:/etc/loki/loki-config.yml:ro
|
||||
- loki_data:/loki
|
||||
command: -config.file=/etc/loki/loki-config.yml
|
||||
expose:
|
||||
- "3100"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:3100/ready | grep -q ready || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
networks:
|
||||
- obs-net
|
||||
|
||||
promtail:
|
||||
image: grafana/promtail:3.4.2
|
||||
container_name: obs-promtail
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./infra/observability/promtail/promtail-config.yml:/etc/promtail/promtail-config.yml:ro
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
# :ro restricts file-system access but NOT Docker API permissions — a compromised Promtail has full daemon access. Accepted risk on single-operator self-hosted archive.
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- promtail_positions:/tmp # persists positions.yaml across restarts — avoids duplicate log ingestion
|
||||
command: -config.file=/etc/promtail/promtail-config.yml
|
||||
networks:
|
||||
- archiv-net # label discovery from application containers via Docker socket
|
||||
- obs-net # log shipping to Loki
|
||||
depends_on:
|
||||
loki:
|
||||
condition: service_healthy
|
||||
|
||||
# --- Traces: Tempo ---
|
||||
|
||||
tempo:
|
||||
image: grafana/tempo:2.7.2
|
||||
container_name: obs-tempo
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./infra/observability/tempo/tempo.yml:/etc/tempo.yml:ro
|
||||
- tempo_data:/var/tempo
|
||||
command: -config.file=/etc/tempo.yml
|
||||
expose:
|
||||
- "3200" # Grafana queries Tempo on this port (obs-net only)
|
||||
- "4317" # OTLP gRPC — backend sends traces here (archiv-net)
|
||||
- "4318" # OTLP HTTP — alternative transport (archiv-net)
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:3200/ready | grep -q ready || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 15s
|
||||
networks:
|
||||
- archiv-net # backend (archive-backend) reaches tempo:4317 over this network
|
||||
- obs-net # Grafana reaches tempo:3200 over this network
|
||||
|
||||
# --- Dashboards: Grafana ---
|
||||
|
||||
obs-grafana:
|
||||
image: grafana/grafana-oss:11.6.1
|
||||
container_name: obs-grafana
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:${PORT_GRAFANA:-3003}:3000"
|
||||
environment:
|
||||
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:-changeme}
|
||||
GF_USERS_ALLOW_SIGN_UP: "false"
|
||||
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL:-http://localhost:3003}
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./infra/observability/grafana/provisioning:/etc/grafana/provisioning:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:3000/api/health | grep -q ok || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
prometheus:
|
||||
condition: service_healthy
|
||||
loki:
|
||||
condition: service_healthy
|
||||
tempo:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- obs-net
|
||||
|
||||
# --- Error Tracking: GlitchTip ---
|
||||
|
||||
obs-redis:
|
||||
image: redis:7-alpine
|
||||
container_name: obs-redis
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- glitchtip_data:/data
|
||||
expose:
|
||||
- "6379"
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- obs-net
|
||||
|
||||
obs-glitchtip:
|
||||
image: glitchtip/glitchtip:6.1.6
|
||||
container_name: obs-glitchtip
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
obs-redis:
|
||||
condition: service_healthy
|
||||
obs-glitchtip-db-init:
|
||||
condition: service_completed_successfully
|
||||
environment:
|
||||
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST:-archive-db}:5432/glitchtip
|
||||
REDIS_URL: redis://obs-redis:6379/0
|
||||
SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
|
||||
GLITCHTIP_DOMAIN: ${GLITCHTIP_DOMAIN:-http://localhost:3002}
|
||||
DEFAULT_FROM_EMAIL: ${APP_MAIL_FROM:-noreply@familienarchiv.local}
|
||||
EMAIL_URL: smtp://mailpit:1025
|
||||
GLITCHTIP_MAX_EVENT_LIFE_DAYS: 90
|
||||
ports:
|
||||
- "127.0.0.1:${PORT_GLITCHTIP:-3002}:8000"
|
||||
healthcheck:
|
||||
test: ["CMD", "bash", "-c", "echo > /dev/tcp/localhost/8000"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
networks:
|
||||
- archiv-net
|
||||
- obs-net
|
||||
|
||||
obs-glitchtip-worker:
|
||||
image: glitchtip/glitchtip:6.1.6
|
||||
container_name: obs-glitchtip-worker
|
||||
restart: unless-stopped
|
||||
command: ./bin/run-celery-with-beat.sh
|
||||
depends_on:
|
||||
obs-redis:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST:-archive-db}:5432/glitchtip
|
||||
REDIS_URL: redis://obs-redis:6379/0
|
||||
SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
|
||||
networks:
|
||||
- archiv-net
|
||||
- obs-net
|
||||
|
||||
obs-glitchtip-db-init:
|
||||
image: postgres:16-alpine
|
||||
container_name: obs-glitchtip-db-init
|
||||
restart: "no"
|
||||
environment:
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
command: >
|
||||
sh -c "psql -h ${POSTGRES_HOST:-archive-db} -U ${POSTGRES_USER} -tc
|
||||
\"SELECT 1 FROM pg_database WHERE datname = 'glitchtip'\" |
|
||||
grep -q 1 ||
|
||||
psql -h ${POSTGRES_HOST:-archive-db} -U ${POSTGRES_USER} -c \"CREATE DATABASE glitchtip;\""
|
||||
networks:
|
||||
- archiv-net
|
||||
|
||||
networks:
|
||||
# Shared network created by the main docker-compose.yml.
|
||||
# The observability stack joins as a peer so Prometheus can scrape
|
||||
# archive-backend by container name. The observability stack must NOT
|
||||
# attempt to create this network — it will fail with a clear error if
|
||||
# the main stack is not running yet.
|
||||
archiv-net:
|
||||
external: true
|
||||
|
||||
# Internal network for observability-service-to-service traffic
|
||||
# (e.g. Grafana → Prometheus, Grafana → Loki, Grafana → Tempo).
|
||||
obs-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
prometheus_data:
|
||||
loki_data:
|
||||
promtail_positions:
|
||||
tempo_data:
|
||||
grafana_data:
|
||||
glitchtip_data:
|
||||
@@ -39,6 +39,7 @@
|
||||
networks:
|
||||
archiv-net:
|
||||
driver: bridge
|
||||
name: ${COMPOSE_NETWORK_NAME:-archiv-net}
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
@@ -141,8 +142,11 @@ services:
|
||||
memswap_limit: ${OCR_MEM_LIMIT:-12g}
|
||||
volumes:
|
||||
- ocr-models:/app/models
|
||||
- ocr-cache:/root/.cache
|
||||
- ocr-cache:/app/cache # HuggingFace / ketos cache — prevents re-downloads on recreate (HF_HOME)
|
||||
environment:
|
||||
HF_HOME: /app/cache
|
||||
XDG_CACHE_HOME: /app/cache
|
||||
TORCH_HOME: /app/models/torch
|
||||
KRAKEN_MODEL_PATH: /app/models/german_kurrent.mlmodel
|
||||
TRAINING_TOKEN: ${OCR_TRAINING_TOKEN}
|
||||
OCR_CONFIDENCE_THRESHOLD: "0.3"
|
||||
@@ -160,6 +164,13 @@ services:
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
start_period: 120s
|
||||
read_only: true
|
||||
tmpfs:
|
||||
- /tmp:size=512m # training endpoints write ZIPs to /tmp; 512 MB covers typical batches (20–50 images)
|
||||
cap_drop:
|
||||
- ALL
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
|
||||
backend:
|
||||
image: familienarchiv/backend:${TAG:-nightly}
|
||||
@@ -212,10 +223,15 @@ services:
|
||||
APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud}
|
||||
SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true}
|
||||
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true}
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT: http://tempo:4318
|
||||
OTEL_LOGS_EXPORTER: none
|
||||
OTEL_METRICS_EXPORTER: none
|
||||
MANAGEMENT_METRICS_TAGS_APPLICATION: Familienarchiv
|
||||
MANAGEMENT_TRACING_SAMPLING_PROBABILITY: ${MANAGEMENT_TRACING_SAMPLING_PROBABILITY:-0.1}
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:8080/actuator/health | grep -q UP || exit 1"]
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:8081/actuator/health | grep -q UP || exit 1"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
|
||||
@@ -87,8 +87,11 @@ services:
|
||||
memswap_limit: 12g
|
||||
volumes:
|
||||
- ocr_models:/app/models
|
||||
- ocr_cache:/root/.cache # Hugging Face / ketos model download cache — prevents re-downloads on container recreate
|
||||
- ocr_cache:/app/cache # HuggingFace / ketos cache — prevents re-downloads on recreate (HF_HOME)
|
||||
environment:
|
||||
HF_HOME: /app/cache
|
||||
XDG_CACHE_HOME: /app/cache
|
||||
TORCH_HOME: /app/models/torch
|
||||
KRAKEN_MODEL_PATH: /app/models/german_kurrent.mlmodel
|
||||
TRAINING_TOKEN: "${OCR_TRAINING_TOKEN:-}"
|
||||
OCR_CONFIDENCE_THRESHOLD: "0.3"
|
||||
@@ -106,6 +109,13 @@ services:
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
start_period: 120s
|
||||
read_only: true
|
||||
tmpfs:
|
||||
- /tmp:size=512m # training endpoints write ZIPs to /tmp; 512 MB covers typical batches (20–50 images)
|
||||
cap_drop:
|
||||
- ALL
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
|
||||
# --- Backend: Spring Boot ---
|
||||
backend:
|
||||
@@ -147,8 +157,20 @@ services:
|
||||
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-false}
|
||||
APP_OCR_BASE_URL: http://ocr-service:8000
|
||||
APP_OCR_TRAINING_TOKEN: "${OCR_TRAINING_TOKEN:-}"
|
||||
SENTRY_DSN: ${SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
# Observability: send traces to Tempo inside archiv-net (OTLP gRPC port 4317)
|
||||
# Tempo is defined in docker-compose.observability.yml (future issue).
|
||||
# OTLP failures are non-fatal — backend starts cleanly without the observability stack.
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT: http://tempo:4317
|
||||
# 10% sampling in this compose (dev + staging) — override locally to 1.0 if needed
|
||||
MANAGEMENT_TRACING_SAMPLING_PROBABILITY: "0.1"
|
||||
ports:
|
||||
- "${PORT_BACKEND}:8080"
|
||||
# Management port — Prometheus scrapes /actuator/prometheus from inside archiv-net.
|
||||
# Not exposed to the host; Docker service-name DNS (backend:8081) is sufficient.
|
||||
expose:
|
||||
- "8081"
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
|
||||
@@ -63,7 +63,7 @@ Members of the cross-cutting layer have no entity of their own, no user-facing C
|
||||
| `audit` | Append-only event store (`audit_log`) for all domain mutations. Feeds the activity feed and Family Pulse dashboard. | Consumed by 5+ domains; no user-facing CRUD of its own |
|
||||
| `config` | Infrastructure bean definitions: `MinioConfig`, `AsyncConfig`, `WebConfig` | Framework infra; no business logic |
|
||||
| `dashboard` | Stats aggregation for the admin dashboard and Family Pulse widget | Aggregates from 3+ domains; no owned entities |
|
||||
| `exception` | `DomainException`, `ErrorCode` enum, `GlobalExceptionHandler` | Framework infra; consumed by every controller and service |
|
||||
| `exception` | `DomainException`, `ErrorCode` enum, `GlobalExceptionHandler` | Framework infra; consumed by every controller and service. Adding a new `ErrorCode` requires matching updates in `frontend/src/lib/shared/errors.ts` and all three `messages/*.json` locale files. |
|
||||
| `filestorage` | `FileService` — MinIO/S3 upload, download, presigned-URL generation | Generic service; consumed by `document` and `ocr` |
|
||||
| `importing` | `MassImportService` — async ODS/Excel batch import | Orchestrates across `person`, `tag`, `document` |
|
||||
| `security` | `SecurityConfig`, `Permission` enum, `@RequirePermission` annotation, `PermissionAspect` (AOP) | Framework infra; enforced globally across all controllers |
|
||||
|
||||
@@ -19,6 +19,7 @@ This doc is the Day-1 checklist and operational reference. It links to the canon
|
||||
5. [Backup + recovery](#5-backup--recovery)
|
||||
6. [Common operational tasks](#6-common-operational-tasks)
|
||||
7. [Known limitations](#7-known-limitations)
|
||||
8. [Upgrade notes](#8-upgrade-notes)
|
||||
|
||||
---
|
||||
|
||||
@@ -43,6 +44,7 @@ graph TD
|
||||
- SSE notifications transit Caddy (browser → Caddy → backend); the backend is never reachable directly from the public internet. The SvelteKit SSR layer is bypassed for SSE, but Caddy is not.
|
||||
- The Caddyfile responds `404` on `/actuator/*` (defense in depth). Internal monitoring scrapes the backend on the docker network, not through Caddy.
|
||||
- Production and staging cohabit on the same host via docker compose project names: `archiv-production` (ports 8080/3000) and `archiv-staging` (ports 8081/3001).
|
||||
- An optional observability stack (Prometheus, Node Exporter, cAdvisor, Loki, Tempo, Grafana, GlitchTip) runs as a separate compose file. Configuration lives under `infra/observability/`. In production and CI, the stack is managed from `/opt/familienarchiv/` (CI copies it there on every nightly run) so bind mounts survive workspace wipes — see §4 for the ops procedure.
|
||||
|
||||
### OCR memory requirements
|
||||
|
||||
@@ -106,6 +108,12 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
|
||||
| `MAIL_SMTP_AUTH` | SMTP auth enabled | `false` (dev) | YES (prod) | — |
|
||||
| `MAIL_STARTTLS_ENABLE` | STARTTLS enabled | `false` (dev) | YES (prod) | — |
|
||||
| `SPRING_PROFILES_ACTIVE` | Spring profile | `dev,e2e` (compose) | YES | — |
|
||||
| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP HTTP endpoint for distributed traces (Tempo). Port 4318 = HTTP transport; port 4317 is gRPC-only and causes "Connection reset" with Spring Boot's HttpExporter. | `http://localhost:4318` | — | — |
|
||||
| `OTEL_LOGS_EXPORTER` | Disable OTLP log export — Promtail captures Docker logs via the logging driver; Tempo does not accept logs. | `none` | — | — |
|
||||
| `OTEL_METRICS_EXPORTER` | Disable OTLP metric export — Prometheus scrapes `/actuator/prometheus` via pull model; Tempo does not accept metrics. | `none` | — | — |
|
||||
| `MANAGEMENT_METRICS_TAGS_APPLICATION` | Common tag added to every Micrometer metric. Required by Grafana's Spring Boot Observability dashboard (ID 17175) `label_values(application)` template variable. | `Familienarchiv` | — | — |
|
||||
| `MANAGEMENT_TRACING_SAMPLING_PROBABILITY` | Micrometer tracing sample rate; overridden to `0.0` in test profile. | `0.1` (compose) / `1.0` (dev) | — | — |
|
||||
| `SENTRY_DSN` | GlitchTip / Sentry DSN for backend error reporting. Leave empty to disable the SDK. Set after GlitchTip first-run (§4). | — | — | YES |
|
||||
|
||||
### PostgreSQL container
|
||||
|
||||
@@ -133,6 +141,21 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
|
||||
| `KRAKEN_MODEL_PATH` | Directory containing Kraken HTR models (populated by `download-kraken-models.sh`) | `/app/models/` | — | — |
|
||||
| `BLLA_MODEL_PATH` | Kraken baseline layout analysis model path | `/app/models/blla.mlmodel` | — | — |
|
||||
| `OCR_MEM_LIMIT` | Container memory cap for ocr-service in `docker-compose.prod.yml`. Set to `6g` on CX32 hosts; leave unset on CX42+ to use the 12g default | `12g` (prod compose default) | — | — |
|
||||
| `XDG_CACHE_HOME` | XDG cache base dir — redirects Matplotlib and other XDG-aware libraries away from the read-only `HOME` (`/home/ocr`) to the writable cache volume | `/app/cache` | — | — |
|
||||
| `TORCH_HOME` | PyTorch model cache — redirects `~/.cache/torch` to the writable models volume | `/app/models/torch` | — | — |
|
||||
|
||||
### Observability stack (`docker-compose.observability.yml`)
|
||||
|
||||
| Variable | Purpose | Default | Required? | Sensitive? |
|
||||
|---|---|---|---|---|
|
||||
| `PORT_PROMETHEUS` | Host port for the Prometheus UI (bound to `127.0.0.1` only) | `9090` | — | — |
|
||||
| `PORT_GRAFANA` | Host port for the Grafana UI (bound to `127.0.0.1` only) | `3003` | — | — |
|
||||
| `POSTGRES_HOST` | PostgreSQL hostname for GlitchTip's db-init job and workers. Override when only the staging stack is running and `archive-db` is not resolvable by that name. | `archive-db` | — | — |
|
||||
| `GRAFANA_ADMIN_PASSWORD` | Grafana `admin` user password | `changeme` | YES (prod) | YES |
|
||||
| `PORT_GLITCHTIP` | Host port for the GlitchTip UI (bound to `127.0.0.1` only) | `3002` | — | — |
|
||||
| `GLITCHTIP_DOMAIN` | Public-facing base URL for GlitchTip (used in email links and CORS) | `http://localhost:3002` | YES (prod) | — |
|
||||
| `GLITCHTIP_SECRET_KEY` | Django secret key for GlitchTip — generate with `python3 -c "import secrets; print(secrets.token_hex(32))"` | — | YES | YES |
|
||||
| `VITE_SENTRY_DSN` | GlitchTip/Sentry DSN for the frontend (SvelteKit) — injected at build time via Vite. Leave empty to disable. Set after GlitchTip first-run (§4). | — | — | YES |
|
||||
|
||||
---
|
||||
|
||||
@@ -179,6 +202,29 @@ curl -fsSL https://tailscale.com/install.sh | sh && tailscale up
|
||||
# files to disk during execution (cleaned up unconditionally on completion).
|
||||
# A multi-tenant runner would need to switch to stdin-piped env files.
|
||||
# (See https://docs.gitea.com/usage/actions/quickstart for the register step.)
|
||||
|
||||
# Runner workspace directory — required for DooD bind-mount resolution (ADR-015).
|
||||
# act_runner stores job workspaces here so that docker compose bind mounts resolve
|
||||
# to real host paths. The path must be identical on the host and inside job containers.
|
||||
mkdir -p /srv/gitea-workspace
|
||||
# Observability config permanent directory — the nightly CI job copies
|
||||
# docker-compose.observability.yml and infra/observability/ here on every run.
|
||||
# The obs stack is always started from this path, not from the workspace.
|
||||
# See ADR-016 for why this directory is used instead of a server-pull approach.
|
||||
mkdir -p /opt/familienarchiv/infra
|
||||
# Both paths must also appear in the runner service volumes in ~/docker/gitea/compose.yaml:
|
||||
# volumes:
|
||||
# - /srv/gitea-workspace:/srv/gitea-workspace
|
||||
# /opt/familienarchiv does NOT need to be in the runner container's volumes — job
|
||||
# containers are spawned by the host daemon directly (DooD), so the host path is
|
||||
# accessible to them as long as runner-config.yaml lists it in valid_volumes + options.
|
||||
# See runner-config.yaml (workdir_parent + valid_volumes + options) and ADR-015/016.
|
||||
|
||||
# ⚠ IMPORTANT: after any change to runner-config.yaml (valid_volumes, options, workdir_parent),
|
||||
# restart the Gitea Act runner for the new config to take effect:
|
||||
# docker restart gitea-runner
|
||||
# Until restarted, job containers are spawned with the old config and any new bind mounts
|
||||
# (e.g. /opt/familienarchiv) will not be available inside job steps.
|
||||
```
|
||||
|
||||
### 3.2 DNS records
|
||||
@@ -209,6 +255,10 @@ git.raddatz.cloud A <server IP>
|
||||
| `MAIL_PORT` | release.yml | typically `587` |
|
||||
| `MAIL_USERNAME` | release.yml | SMTP user |
|
||||
| `MAIL_PASSWORD` | release.yml | SMTP password |
|
||||
| `GRAFANA_ADMIN_PASSWORD` | both | Grafana `admin` login — generate a strong password |
|
||||
| `GLITCHTIP_SECRET_KEY` | both | Django secret key — `openssl rand -hex 32` |
|
||||
| `SENTRY_DSN` | both | GlitchTip project DSN — set after first-run (§4); leave empty to keep Sentry disabled |
|
||||
| `VITE_SENTRY_DSN` | both | GlitchTip frontend project DSN — set after first-run (§4); leave empty to keep Sentry disabled |
|
||||
|
||||
### 3.4 First deploy
|
||||
|
||||
@@ -236,6 +286,9 @@ Before the first deploy: rotate `PROD_APP_ADMIN_PASSWORD` to a strong value. Aft
|
||||
|
||||
## 4. Logs + observability
|
||||
|
||||
> **Developer guide (where to look for what, LogQL queries, trace exploration) → [docs/OBSERVABILITY.md](./OBSERVABILITY.md).**
|
||||
> This section covers the ops side: starting the stack, env vars, and CI wiring.
|
||||
|
||||
### First-response commands
|
||||
|
||||
```bash
|
||||
@@ -256,9 +309,156 @@ docker compose logs --tail=200 <service>
|
||||
- **Spring Actuator health**: `http://localhost:8080/actuator/health` (internal only in prod — port 8081 for Prometheus scraping)
|
||||
- **Prometheus scraping**: management port 8081, path `/actuator/prometheus`. Internal only; Caddy blocks `/actuator/*` externally.
|
||||
|
||||
### Future observability
|
||||
### Observability stack
|
||||
|
||||
Phase 7 of the Production v1 milestone adds Prometheus + Loki + Grafana. No monitoring infrastructure is in place yet.
|
||||
An observability stack is available via `docker-compose.observability.yml`. Configuration lives under `infra/observability/`.
|
||||
|
||||
#### Dev — start from the workspace
|
||||
|
||||
```bash
|
||||
docker compose up -d # creates archiv-net
|
||||
docker compose -f docker-compose.observability.yml up -d
|
||||
```
|
||||
|
||||
#### Why the obs stack is managed differently from the main app stack
|
||||
|
||||
The main app stack (`docker-compose.prod.yml`) has no config-file bind mounts — its containers read config from env vars and image defaults. The workspace is wiped after each CI run but that does not affect running containers, because they hold no references to workspace paths.
|
||||
|
||||
The obs stack is different: `prometheus.yml`, `tempo.yml`, Loki config, Grafana provisioning files, and Promtail config are all bind-mounted from the host filesystem into their containers. If those source paths disappear (workspace wipe), the containers can restart fine until a `docker compose up` is run again — at that point Docker tries to re-resolve the bind-mount source and fails because the workspace path no longer exists.
|
||||
|
||||
The fix is to keep the obs compose file and config tree at a **permanent path** that CI copies to on every run but which survives between runs: `/opt/familienarchiv/` (see ADR-016).
|
||||
|
||||
#### Production — managed from `/opt/familienarchiv/`
|
||||
|
||||
Every CI run (nightly + release) copies `docker-compose.observability.yml` and `infra/observability/` to `/opt/familienarchiv/` before starting the stack. Bind mounts then resolve to `/opt/familienarchiv/infra/observability/…` — a stable path that outlasts any workspace wipe.
|
||||
|
||||
**Environment variables** follow the same two-source model as the main stack:
|
||||
|
||||
| Source | What it contains | Managed by |
|
||||
|---|---|---|
|
||||
| `infra/observability/obs.env` | All non-secret config (ports, URLs, hostnames) | Git — reviewed in PRs |
|
||||
| `/opt/familienarchiv/obs-secrets.env` | Passwords and secret keys only | CI — written fresh from Gitea secrets on every deploy |
|
||||
|
||||
Both files are passed explicitly via `--env-file` to the compose command, so there is no implicit auto-read `.env` and no operator-managed file to keep in sync.
|
||||
|
||||
**Non-secret config** (`infra/observability/obs.env`):
|
||||
|
||||
| Key | Value | Notes |
|
||||
|---|---|---|
|
||||
| `PORT_GRAFANA` | `3003` | Avoids collision with staging frontend on port 3001 |
|
||||
| `PORT_GLITCHTIP` | `3002` | |
|
||||
| `PORT_PROMETHEUS` | `9090` | |
|
||||
| `GF_SERVER_ROOT_URL` | `https://grafana.archiv.raddatz.cloud` | Required for alert email links and OAuth redirects |
|
||||
| `GLITCHTIP_DOMAIN` | `https://glitchtip.archiv.raddatz.cloud` | Must match the Caddy vhost |
|
||||
| `POSTGRES_HOST` | `archive-db` | Override if only the staging stack is running |
|
||||
|
||||
**Secret keys** (set in Gitea secrets, injected by CI into `obs-secrets.env`):
|
||||
|
||||
| Gitea secret | Notes |
|
||||
|---|---|
|
||||
| `GRAFANA_ADMIN_PASSWORD` | Strong unique password; shared by nightly and release |
|
||||
| `GLITCHTIP_SECRET_KEY` | `openssl rand -hex 32`; shared by nightly and release |
|
||||
| `STAGING_POSTGRES_PASSWORD` / `PROD_POSTGRES_PASSWORD` | Must match the running PostgreSQL container |
|
||||
|
||||
To start or restart the obs stack manually on the server (after CI has run at least once):
|
||||
|
||||
```bash
|
||||
docker compose \
|
||||
-f /opt/familienarchiv/docker-compose.observability.yml \
|
||||
--env-file /opt/familienarchiv/infra/observability/obs.env \
|
||||
--env-file /opt/familienarchiv/obs-secrets.env \
|
||||
up -d --wait --remove-orphans
|
||||
```
|
||||
|
||||
> **Note (manual ops only):** CI clears the destination with `rm -rf` before copying, so deleted files are removed automatically on the next run. If you copy manually with `cp -r` without first removing the directory, stale files from deleted configs will persist until cleaned up:
|
||||
> ```bash
|
||||
> rm /opt/familienarchiv/infra/observability/<path-to-removed-file>
|
||||
> ```
|
||||
|
||||
Current services:
|
||||
|
||||
| Service | Image | Purpose |
|
||||
|---|---|---|
|
||||
| `obs-prometheus` | `prom/prometheus:v3.4.0` | Scrapes metrics from backend management port 8081 (`/actuator/prometheus`), node-exporter, and cAdvisor |
|
||||
| `obs-node-exporter` | `prom/node-exporter:v1.9.0` | Host-level CPU / memory / disk / network metrics |
|
||||
| `obs-cadvisor` | `gcr.io/cadvisor/cadvisor:v0.52.1` | Per-container resource metrics |
|
||||
| `obs-loki` | `grafana/loki:3.4.2` | Log aggregation — receives log streams from Promtail. Port 3100 is `expose`-only (not host-bound). |
|
||||
| `obs-promtail` | `grafana/promtail:3.4.2` | Log shipping agent — reads all Docker container logs via the Docker socket and forwards them to Loki with `container_name`, `compose_service`, `compose_project`, and `job` labels. The `job` label is mapped from the Docker Compose service name (`com.docker.compose.service`) so that Grafana Loki dashboard queries (`{job="backend"}`, `{job="frontend"}`) work out of the box and the "App" variable dropdown is populated. |
|
||||
| `obs-tempo` | `grafana/tempo:2.7.2` | Distributed trace storage — OTLP HTTP receiver on port 4318 (`archiv-net`-internal; backend sends traces here). Grafana queries traces on port 3200 (`obs-net`-internal). All ports are `expose`-only (not host-bound). |
|
||||
| `obs-grafana` | `grafana/grafana-oss:11.6.1` | Unified observability UI — metrics dashboards, log exploration, trace viewer. Bound to `127.0.0.1:${PORT_GRAFANA:-3003}` on the host. |
|
||||
| `obs-glitchtip` | `glitchtip/glitchtip:6.1.6` | Sentry-compatible error tracker. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces. Bound to `127.0.0.1:${PORT_GLITCHTIP:-3002}`. |
|
||||
| `obs-glitchtip-worker` | `glitchtip/glitchtip:6.1.6` | Celery + beat worker — processes async GlitchTip tasks (event ingestion, notifications, cleanup). |
|
||||
| `obs-redis` | `redis:7-alpine` | Celery task broker for GlitchTip. Internal to `obs-net`; no host port exposed. |
|
||||
| `obs-glitchtip-db-init` | `postgres:16-alpine` | One-shot init container. Creates the `glitchtip` database on the existing `archive-db` PostgreSQL instance if it does not already exist. Runs at stack startup; exits cleanly once done. |
|
||||
|
||||
#### Grafana
|
||||
|
||||
| Item | Value |
|
||||
|---|---|
|
||||
| URL | `http://localhost:3003` (or `http://localhost:$PORT_GRAFANA`) |
|
||||
| Username | `admin` |
|
||||
| Password | `$GRAFANA_ADMIN_PASSWORD` (default: `changeme` — **change before exposing to a network**) |
|
||||
|
||||
Datasources are auto-provisioned on first start (Prometheus, Loki, Tempo — no manual setup required). Three dashboards are pre-loaded:
|
||||
|
||||
| Dashboard | Grafana ID | Purpose |
|
||||
|---|---|---|
|
||||
| Node Exporter Full | 1860 | Host CPU, memory, disk, network |
|
||||
| Spring Boot Observability | 17175 | JVM metrics, HTTP latency, error rate |
|
||||
| Loki Logs | 13639 | Log exploration and filtering |
|
||||
|
||||
Tempo traces are accessible via Grafana Explore → Tempo datasource, and linked from Loki logs via the `traceId` derived field.
|
||||
|
||||
**Loki quick checks** (after ~60 s, run from inside the `obs-loki` container):
|
||||
|
||||
```bash
|
||||
# Loki health
|
||||
docker exec obs-loki wget -qO- http://localhost:3100/ready
|
||||
|
||||
# List labels
|
||||
docker exec obs-loki wget -qO- 'http://localhost:3100/loki/api/v1/labels'
|
||||
|
||||
# Query logs by service (stable across dev and prod environments)
|
||||
docker exec obs-loki wget -qO- \
|
||||
'http://localhost:3100/loki/api/v1/query_range?query=%7Bcompose_service%3D%22backend%22%7D&limit=5'
|
||||
```
|
||||
|
||||
**Prefer `compose_service` over `container_name` in LogQL queries** — `container_name` differs between dev (`archive-backend`) and prod (`archiv-production-backend-1`), while `compose_service` is stable (`backend`, `db`, `minio`, etc.).
|
||||
|
||||
Prometheus port `9090` and Grafana port `3003` (default; configurable via `PORT_GRAFANA`) are bound to `127.0.0.1` on the host. No other observability ports are host-bound.
|
||||
|
||||
#### GlitchTip
|
||||
|
||||
| Item | Value |
|
||||
|---|---|
|
||||
| URL | `http://localhost:3002` (or `http://localhost:$PORT_GLITCHTIP`) |
|
||||
|
||||
**Required env vars** — set in `.env` before first start:
|
||||
|
||||
```bash
|
||||
GLITCHTIP_SECRET_KEY=$(python3 -c "import secrets; print(secrets.token_hex(32))")
|
||||
GLITCHTIP_DOMAIN=http://localhost:3002 # change to your public URL in prod
|
||||
PORT_GLITCHTIP=3002 # optional, defaults to 3002
|
||||
```
|
||||
|
||||
**Database:** GlitchTip shares the existing `archive-db` PostgreSQL instance. The `obs-glitchtip-db-init` one-shot container creates a dedicated `glitchtip` database on first stack start — no manual step required.
|
||||
|
||||
**First-run steps** (one-time, after `docker compose -f docker-compose.observability.yml up -d`):
|
||||
|
||||
```bash
|
||||
# 1. Create the Django superuser (interactive)
|
||||
docker exec -it obs-glitchtip ./manage.py createsuperuser
|
||||
|
||||
# 2. Open the GlitchTip UI and log in
|
||||
open http://localhost:3002
|
||||
|
||||
# 3. Create an organisation (e.g. "Familienarchiv")
|
||||
# 4. Create two projects:
|
||||
# - "familienarchiv-frontend" (platform: JavaScript / SvelteKit)
|
||||
# - "familienarchiv-backend" (platform: Java / Spring Boot)
|
||||
# 5. Copy each project's DSN from Settings → Projects → <project> → Client Keys
|
||||
# 6. Wire the DSNs into the backend and frontend via env vars (separate issue)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -357,3 +557,28 @@ bash scripts/download-kraken-models.sh
|
||||
| **No multi-region** | Single PostgreSQL + MinIO instance; no replication or failover | Deliberate scope decision |
|
||||
| **Max upload size** | 50 MB per file (500 MB per request for multi-file) | Configurable in `application.yaml` (`spring.servlet.multipart`) |
|
||||
| **No automated backup** | Phase 5 of Production v1 milestone is not yet implemented | See §5 above |
|
||||
|
||||
---
|
||||
|
||||
## 8. Upgrade notes
|
||||
|
||||
Version-specific one-time steps that must be run before or after upgrading to a given release. Each subsection is safe to skip on a fresh install.
|
||||
|
||||
### Upgrading to PR #611 — non-root OCR container
|
||||
|
||||
The OCR cache volume path changed from `/root/.cache` to `/app/cache` (PR #611 — CIS Docker §4.1 hardening). The existing volume was written as root and is inaccessible to the new non-root `ocr` user, causing a `PermissionError` on startup.
|
||||
|
||||
**Before starting the updated container stack**, drop the old root-owned volume. The volume name depends on the compose project name:
|
||||
|
||||
```bash
|
||||
# Dev (docker-compose.yml — project name: familienarchiv)
|
||||
docker volume rm familienarchiv_ocr_cache
|
||||
|
||||
# Production (docker-compose.prod.yml -p archiv-production)
|
||||
docker volume rm archiv-production_ocr-cache
|
||||
|
||||
# Staging (docker-compose.prod.yml -p archiv-staging)
|
||||
docker volume rm archiv-staging_ocr-cache
|
||||
```
|
||||
|
||||
The volume is recreated automatically on `docker compose up`. The OCR service will re-download its model cache on first startup (approximately 1–2 GB, one-time cost).
|
||||
|
||||
180
docs/OBSERVABILITY.md
Normal file
180
docs/OBSERVABILITY.md
Normal file
@@ -0,0 +1,180 @@
|
||||
# Observability Guide
|
||||
|
||||
> **Ops reference (starting the stack, env vars, CI wiring) → [DEPLOYMENT.md §4](./DEPLOYMENT.md#4-logs--observability).**
|
||||
> This file is for developers: what signal lives where, how to reach it, and what to look for.
|
||||
|
||||
## Where to look for what
|
||||
|
||||
| I want to… | Go to |
|
||||
|---|---|
|
||||
| See the last N log lines from the backend | `docker compose logs --tail=100 backend` |
|
||||
| Search logs by keyword across time | Grafana → Explore → Loki |
|
||||
| Understand why an HTTP request failed | Grafana → Explore → Loki → filter by `traceId` → follow link to Tempo |
|
||||
| See a full distributed trace (DB queries, HTTP calls) | Grafana → Explore → Tempo → search by service or trace ID |
|
||||
| Check JVM heap / GC / thread count | Grafana → Dashboards → Spring Boot Observability |
|
||||
| Check HTTP error rate or p95 latency | Grafana → Dashboards → Spring Boot Observability |
|
||||
| Check host CPU / memory / disk | Grafana → Dashboards → Node Exporter Full |
|
||||
| See grouped application errors with stack traces | GlitchTip |
|
||||
| Check if the backend is healthy | `curl http://localhost:8081/actuator/health` (on the server) |
|
||||
| Check what Prometheus is scraping | `curl http://localhost:9090/api/v1/targets` (on the server) |
|
||||
|
||||
## Access
|
||||
|
||||
| Tool | External URL | Who it's for |
|
||||
|---|---|---|
|
||||
| Grafana | `https://grafana.archiv.raddatz.cloud` | Logs, metrics, traces — the primary observability UI |
|
||||
| GlitchTip | `https://glitchtip.archiv.raddatz.cloud` | Grouped errors with stack traces and release tracking |
|
||||
|
||||
Loki, Tempo, and Prometheus have no external URL. They are internal services, accessible only through Grafana (or via SSH tunnel — see below).
|
||||
|
||||
## Logs (Loki)
|
||||
|
||||
Logs reach Loki via Promtail, which reads all Docker container logs from the Docker socket and ships them with labels derived from Docker Compose metadata.
|
||||
|
||||
### Labels available in every log line
|
||||
|
||||
| Label | What it contains | Example |
|
||||
|---|---|---|
|
||||
| `job` | Compose service name | `backend`, `frontend`, `db` |
|
||||
| `compose_service` | Same as `job` | `backend` |
|
||||
| `compose_project` | Compose project name | `archiv-staging`, `archiv-production` |
|
||||
| `container_name` | Docker container name | `archiv-staging-backend-1` |
|
||||
| `filename` | Docker log source | `/var/lib/docker/containers/…` |
|
||||
|
||||
**Use `job` in LogQL queries** — it is stable across dev, staging, and production. `container_name` changes between environments.
|
||||
|
||||
### Common LogQL queries in Grafana Explore
|
||||
|
||||
```logql
|
||||
# All backend logs
|
||||
{job="backend"}
|
||||
|
||||
# Backend ERROR and WARN lines only
|
||||
{job="backend"} |= "ERROR" or {job="backend"} |= "WARN"
|
||||
|
||||
# All logs for a specific request (paste a traceId from a log line)
|
||||
{job="backend"} |= "3fa85f64-5717-4562-b3fc-2c963f66afa6"
|
||||
|
||||
# Log lines containing a specific exception class
|
||||
{job="backend"} |~ "DomainException|NullPointerException"
|
||||
|
||||
# Frontend logs
|
||||
{job="frontend"}
|
||||
|
||||
# Database (slow query log, if enabled)
|
||||
{job="db"}
|
||||
```
|
||||
|
||||
### Log → Trace correlation
|
||||
|
||||
Spring Boot writes the active `traceId` into every log line when a request is being processed:
|
||||
|
||||
```
|
||||
2026-05-16 ... INFO [Familienarchiv,3fa85f64...,1b2c3d4e] o.r.f.document.DocumentService : ...
|
||||
```
|
||||
|
||||
In Grafana Explore → Loki, log lines with a `traceId` field show a **Tempo** link. Clicking it opens the full trace in Explore → Tempo without copying and pasting IDs.
|
||||
|
||||
This linking is configured in the Loki datasource provisioning via the `traceId` derived field regex. No manual setup required.
|
||||
|
||||
## Traces (Tempo)
|
||||
|
||||
The backend sends traces to Tempo via OTLP HTTP (port 4318). Every inbound HTTP request and every JPA query produces a span. Spans are linked into traces by the propagated `traceId` header.
|
||||
|
||||
### Finding a trace in Grafana
|
||||
|
||||
**Option A — from a log line:**
|
||||
1. Grafana → Explore → select *Loki* datasource
|
||||
2. Query `{job="backend"}` and find the failing request
|
||||
3. Click the **Tempo** link in the log line (appears when `traceId` is present)
|
||||
|
||||
**Option B — by service:**
|
||||
1. Grafana → Explore → select *Tempo* datasource
|
||||
2. Query type: **Search**
|
||||
3. Service name: `familienarchiv-backend`
|
||||
4. Filter by HTTP status, duration, or operation name as needed
|
||||
|
||||
**Option C — by trace ID:**
|
||||
1. Grafana → Explore → select *Tempo* datasource
|
||||
2. Query type: **TraceQL** or **Trace ID**
|
||||
3. Paste the trace ID
|
||||
|
||||
### What each span type tells you
|
||||
|
||||
| Root span name pattern | What it covers |
|
||||
|---|---|
|
||||
| `GET /api/documents`, `POST /api/documents` | Full HTTP request lifecycle |
|
||||
| `SELECT archiv.*` | A single JPA/JDBC query inside that request |
|
||||
| `HikariPool.getConnection` | Connection pool wait time |
|
||||
|
||||
A slow `SELECT` span inside an otherwise fast HTTP span pinpoints a missing index. A slow `HikariPool.getConnection` span indicates connection pool exhaustion.
|
||||
|
||||
### Sampling rate
|
||||
|
||||
- **Dev**: 100% of requests are traced (`management.tracing.sampling.probability: 1.0` in `application.yaml`)
|
||||
- **Staging / Production**: 10% (`MANAGEMENT_TRACING_SAMPLING_PROBABILITY=0.1` in `docker-compose.prod.yml`)
|
||||
|
||||
To find a trace for a specific request in staging/production, either increase the sampling rate temporarily or trigger the request multiple times.
|
||||
|
||||
## Metrics (Prometheus → Grafana)
|
||||
|
||||
Prometheus scrapes the backend management endpoint every 15 s:
|
||||
|
||||
```
|
||||
Target: backend:8081/actuator/prometheus
|
||||
Labels: job="spring-boot", application="Familienarchiv"
|
||||
```
|
||||
|
||||
All Spring Boot metrics carry the `application="Familienarchiv"` tag, which is how the Grafana Spring Boot Observability dashboard (ID 17175) filters to this service.
|
||||
|
||||
### Useful Prometheus queries (run on the server or via Grafana Explore → Prometheus)
|
||||
|
||||
```promql
|
||||
# HTTP error rate (5xx) as a fraction of all requests
|
||||
sum(rate(http_server_requests_seconds_count{status=~"5.."}[5m]))
|
||||
/ sum(rate(http_server_requests_seconds_count[5m]))
|
||||
|
||||
# p95 response time
|
||||
histogram_quantile(0.95, sum by (le) (
|
||||
rate(http_server_requests_seconds_bucket[5m])
|
||||
))
|
||||
|
||||
# JVM heap used
|
||||
jvm_memory_used_bytes{area="heap", application="Familienarchiv"}
|
||||
|
||||
# Active DB connections
|
||||
hikaricp_connections_active
|
||||
```
|
||||
|
||||
## Errors (GlitchTip)
|
||||
|
||||
GlitchTip receives errors from both the backend (via Sentry Java SDK) and the frontend (via Sentry JavaScript SDK). It groups events by fingerprint, tracks first/last seen times, and links to the release that introduced the error.
|
||||
|
||||
GlitchTip complements Loki: use GlitchTip when you need **grouped, de-duplicated errors with stack traces and release attribution**; use Loki when you need **raw log lines with full context** or want to search across all log levels.
|
||||
|
||||
## Direct API access (debugging only)
|
||||
|
||||
Loki and Tempo bind no host ports. To reach them directly from your laptop, use an SSH tunnel through the server:
|
||||
|
||||
```bash
|
||||
# Loki API on localhost:3100 (then query via curl or logcli)
|
||||
ssh -L 3100:172.20.0.x:3100 root@raddatz.cloud
|
||||
# Replace 172.20.0.x with the obs-loki container IP:
|
||||
# docker inspect obs-loki --format '{{.NetworkSettings.Networks.archiv-obs-net.IPAddress}}'
|
||||
|
||||
# Tempo API on localhost:3200 (then query via curl or tempo-cli)
|
||||
ssh -L 3200:172.20.0.x:3200 root@raddatz.cloud
|
||||
```
|
||||
|
||||
In practice, Grafana Explore covers all common debugging workflows without needing direct API access.
|
||||
|
||||
## Signal summary
|
||||
|
||||
| Signal | Source | Transport | Storage | UI |
|
||||
|---|---|---|---|---|
|
||||
| Application logs | Spring Boot stdout → Docker log driver | Promtail → Loki push API | Loki | Grafana Explore → Loki |
|
||||
| Distributed traces | Spring Boot OTel agent | OTLP HTTP → Tempo:4318 | Tempo | Grafana Explore → Tempo |
|
||||
| JVM + HTTP metrics | Spring Actuator `/actuator/prometheus` | Prometheus pull (15 s) | Prometheus | Grafana dashboards |
|
||||
| Host metrics | node-exporter | Prometheus pull | Prometheus | Grafana → Node Exporter Full |
|
||||
| Container metrics | cAdvisor | Prometheus pull | Prometheus | Grafana (via Prometheus datasource) |
|
||||
| Application errors | Sentry SDK | HTTP POST → GlitchTip ingest | GlitchTip DB | GlitchTip UI |
|
||||
92
docs/adr/013-client-branches-coverage-threshold.md
Normal file
92
docs/adr/013-client-branches-coverage-threshold.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# ADR 013 — Client-Project Branch Coverage Threshold
|
||||
|
||||
**Status:** Accepted
|
||||
**Date:** 2026-05-14
|
||||
**Issues:** [#556 — threshold drop](https://git.raddatz.cloud/marcel/familienarchiv/issues/556) · [#496 — long-tail-grind tracking](https://git.raddatz.cloud/marcel/familienarchiv/issues/496)
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
The browser-mode component test suite (`vitest.client-coverage.config.ts`) enforces Istanbul coverage thresholds across `lines`, `functions`, `branches`, and `statements`. The `branches` metric was set to 80%, but the codebase sits at **75%** — below the gate — causing every CI run of `unit-tests` and `coverage-flake-probe` to fail on this check alone, even when all tests are green.
|
||||
|
||||
**Measured baseline (2026-05-14, branch `feat/issue-553-birpc-async-mock-factory`, head `2e6cc346`):**
|
||||
|
||||
```
|
||||
branches: 75% (below the 80% gate — reason for this ADR)
|
||||
lines: ≥ 80%
|
||||
functions: ≥ 80%
|
||||
statements: ≥ 80%
|
||||
```
|
||||
|
||||
Reproducer:
|
||||
|
||||
```bash
|
||||
cd frontend && npm ci && npx vitest run -c vitest.client-coverage.config.ts --coverage
|
||||
```
|
||||
|
||||
### The long-tail-grind problem
|
||||
|
||||
In Istanbul's branch accounting, when a child component gains test coverage its branches are added to the parent's denominator. A child moving from 40% → 80% coverage can drag a parent from 78% → 72% because more branches in the call graph become reachable and must be covered. This is not a bug — it is how branch accounting works — but it means that on a large SvelteKit application the denominator grows with every coverage improvement, making an arbitrary 80% ceiling a constant grind. Per #496, the expected cost to reach 80% branches from 75% is 30–100+ commits with no guarantee of stability.
|
||||
|
||||
### Why this layer is different
|
||||
|
||||
The 80% branch floor used for backend unit/integration tests is appropriate for Java service code and permission logic. Browser-mode component coverage measures Svelte template branches: conditional class bindings, `{#if}` blocks, empty/loaded/error state guards. These branches have a fundamentally different accounting model and a higher inherent denominator. This ADR **only** lowers the browser-mode component gate; the backend test coverage gates are unaffected.
|
||||
|
||||
### Security-relevant uncovered components
|
||||
|
||||
The following auth/permission-boundary components currently have low or zero branch coverage. When ratchet-up work begins (see below), these are the highest-priority targets:
|
||||
|
||||
- `src/routes/login/+page.svelte`
|
||||
- `src/routes/forgot-password/+page.svelte`
|
||||
- `src/routes/reset-password/+page.svelte`
|
||||
- `src/routes/register/+page.svelte`
|
||||
|
||||
Note: the 75% figure already reflects the absence of coverage on these files. Lowering the gate does not create this gap — it makes the existing state legible.
|
||||
|
||||
---
|
||||
|
||||
## Decision
|
||||
|
||||
Drop the `branches` threshold from `80` → `75` in `frontend/vitest.client-coverage.config.ts`. Leave `lines`, `functions`, and `statements` at `80`.
|
||||
|
||||
The 75% figure matches the measured current state, allowing CI to pass while deliberate coverage improvement work (tracked in #496) continues without blocking other PRs. The asymmetry in the thresholds block is intentional and documented with an inline comment pointing here.
|
||||
|
||||
---
|
||||
|
||||
## Ratchet Rule
|
||||
|
||||
The branches threshold ratchets **up by 3 percentage points** when the rolling 3-PR-average client-project branches figure on `main` stays at or above `threshold + 3pp` for ≥ 30 consecutive days. Direction is **up-only** — never lower the floor below 75 without a new ADR superseding this one. Manual today (verify before any `vitest.client-coverage.config.ts` edit); a future automation issue may codify the check.
|
||||
|
||||
Concretely:
|
||||
- When `main` sustains ≥ 78% branches across 3 consecutive PRs for 30 days → raise gate to 78%
|
||||
- When `main` sustains ≥ 81% branches across 3 consecutive PRs for 30 days → raise gate back to 80%
|
||||
|
||||
---
|
||||
|
||||
## Non-goals
|
||||
|
||||
- **Not** raising actual branch coverage — that is #496's job, tracked separately.
|
||||
- **Not** touching the server-project coverage configuration (`vitest.config.ts`) — only the client project hits the long-tail-grind pattern.
|
||||
- **Not** removing or relaxing any existing test files, `skipIf` guards, or axe-playwright accessibility runs.
|
||||
|
||||
---
|
||||
|
||||
## Consequences
|
||||
|
||||
**Easier:**
|
||||
- CI unblocked — `unit-tests` and `coverage-flake-probe` jobs pass when all tests are green
|
||||
- The ratchet rule creates a concrete, observable path back to 80%
|
||||
|
||||
**Harder:**
|
||||
- The gate now has near-zero headroom — any branch regression that drops below 75% will fail CI immediately
|
||||
- The 75% floor must not be treated as a permanent ceiling; the ratchet discipline requires active attention
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- [#496 — Branch coverage long-tail grind](https://git.raddatz.cloud/marcel/familienarchiv/issues/496)
|
||||
- [#556 — This threshold drop](https://git.raddatz.cloud/marcel/familienarchiv/issues/556)
|
||||
- [ADR 012 — Browser-Mode Test Mocking Strategy](./012-browser-test-mocking-strategy.md)
|
||||
- `frontend/vitest.client-coverage.config.ts` — thresholds block (lines 44–51)
|
||||
122
docs/adr/014-upload-artifact-v3-pin.md
Normal file
122
docs/adr/014-upload-artifact-v3-pin.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# ADR 014 — Pin actions/upload-artifact to v3 (Gitea act_runner v4 protocol incompatibility)
|
||||
|
||||
**Status:** Accepted
|
||||
**Date:** 2026-05-14
|
||||
**Issues:** [#557 — re-regression](https://git.raddatz.cloud/marcel/familienarchiv/issues/557) · [#14 — original incident](https://git.raddatz.cloud/marcel/familienarchiv/issues/14)
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
`actions/upload-artifact` is available in two incompatible major versions. The v4 client
|
||||
uploads via a GitHub-specific artifact API that is **not implemented** in Gitea's
|
||||
`act_runner` (the self-hosted CI substrate established by ADR-011). When a workflow step
|
||||
uses `actions/upload-artifact@v4` on this runner, `act_runner` returns a non-zero exit
|
||||
code from the v4 client even when all tests pass, producing:
|
||||
|
||||
> green test suite — red job status — no artifact uploaded
|
||||
|
||||
The failure lands in the upload step, _after_ the test output, making it hard to diagnose
|
||||
from the build log.
|
||||
|
||||
### Incident history
|
||||
|
||||
| Date | Commit | Event |
|
||||
|---|---|---|
|
||||
| 2026-03-19 | `9f3f022e` | Original downgrade: `upload-artifact@v4 → v3` |
|
||||
| 2026-03-19 | `4142c7cd` | Rationale committed; closes #14 |
|
||||
| 2026-05-05 | `410b91e2` | Re-regression: upgraded back to v4 without referencing #14 |
|
||||
| 2026-05-14 | this PR | Second downgrade + ADR + grep guard |
|
||||
|
||||
The root cause of the re-regression was institutional-memory failure: the original
|
||||
rationale was captured only in a commit body, invisible at the point of change (the
|
||||
`uses:` line). This ADR, the inline comments, and the grep guard are the three
|
||||
defence layers that replace that missing breadcrumb.
|
||||
|
||||
---
|
||||
|
||||
## Decision
|
||||
|
||||
**Pin all `actions/upload-artifact` and `actions/download-artifact` call sites to `@v3`.**
|
||||
|
||||
Both action families share the same v4 protocol incompatibility with `act_runner`.
|
||||
Pinning to the major tag (`@v3`) keeps us on the latest v3 patch without Renovate noise.
|
||||
|
||||
Three call sites are pinned:
|
||||
- `.gitea/workflows/ci.yml` — "Upload coverage reports" step
|
||||
- `.gitea/workflows/ci.yml` — "Upload screenshots" step
|
||||
- `.gitea/workflows/coverage-flake-probe.yml` — "Upload coverage log on failure" step
|
||||
|
||||
Each pinned `uses:` line carries a load-bearing inline comment:
|
||||
|
||||
```yaml
|
||||
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
|
||||
- uses: actions/upload-artifact@v3
|
||||
```
|
||||
|
||||
A CI grep guard enforces the constraint automatically (see below).
|
||||
|
||||
---
|
||||
|
||||
## Consequences
|
||||
|
||||
### Enforcement layers (defence in depth)
|
||||
|
||||
1. **Inline comments** on every `uses:` line — visible at the point of change.
|
||||
2. **CI grep guard** in `.gitea/workflows/ci.yml` ("Assert no (upload|download)-artifact
|
||||
past v3") — fails the build if a future commit re-introduces `@v4` or higher on any
|
||||
workflow file. Anchored to YAML `uses:` lines to avoid false positives on embedded
|
||||
shell strings. Includes a self-test that proves the regex catches v4+ before scanning
|
||||
the repo.
|
||||
3. **This ADR** — canonical rationale; cross-referenced by comments and guard message.
|
||||
|
||||
### How to spot the symptom
|
||||
|
||||
- Test suite output shows green (vitest, surefire, pytest all exit 0)
|
||||
- CI job status shows red
|
||||
- Artifacts section of the run is empty
|
||||
- Build log shows a non-zero exit from the `Upload …` step immediately after green tests
|
||||
|
||||
### `@v3` maintenance-mode status
|
||||
|
||||
GitHub placed `actions/upload-artifact@v3` in maintenance mode (no new features) but it
|
||||
has not been removed and carries no known unpatched CVE as of this writing. If GitHub
|
||||
publishes a v3-specific security advisory, that is an additional trigger to re-evaluate
|
||||
(see upgrade conditions below).
|
||||
|
||||
### When to remove this pin
|
||||
|
||||
Re-evaluate pinning **when either condition is met:**
|
||||
|
||||
1. `gitea/act_runner` ships a release with v4 artifact protocol support. Track upstream:
|
||||
<https://gitea.com/gitea/act_runner>
|
||||
2. `actions/upload-artifact@v3` acquires an unpatched CVE that cannot be mitigated
|
||||
at the runner level.
|
||||
|
||||
When upgrading: remove the grep guard step, update all three `uses:` lines, remove the
|
||||
inline comments, and update this ADR's status to Superseded.
|
||||
|
||||
---
|
||||
|
||||
## Alternatives
|
||||
|
||||
### SHA pinning (`uses: actions/upload-artifact@<sha>`)
|
||||
|
||||
More secure against action repository compromise, but adds Renovate update friction
|
||||
and is disproportionate for a self-hosted, single-tenant Gitea instance with one
|
||||
trusted contributor (ADR-011). Rejected.
|
||||
|
||||
### Minor/patch pinning (`@v3.4.0`)
|
||||
|
||||
Avoids Renovate PRs but freezes us on a specific patch. The v3 major track is in
|
||||
maintenance mode — minor pinning has no benefit and would require manual updates
|
||||
for any v3 security patches. Rejected.
|
||||
|
||||
### Renovate `packageRules` bypass
|
||||
|
||||
Would prevent automated PRs from proposing v4. Not needed while Renovate is not
|
||||
configured for this repository. Revisit if Renovate is introduced.
|
||||
|
||||
### Migrating the runner to a v4-compatible Gitea release
|
||||
|
||||
Out of scope for this issue. A separate decision; tracked in #557's non-goals.
|
||||
69
docs/adr/015-dood-workspace-bind-mount.md
Normal file
69
docs/adr/015-dood-workspace-bind-mount.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# ADR-015: DooD workspace bind mount for Compose file bind-mount resolution
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
The deploy workflows (`.gitea/workflows/nightly.yml`, `release.yml`) run job steps inside Docker containers via Docker-out-of-Docker (DooD): the Gitea runner mounts the host Docker socket, and act_runner spawns sibling containers for each job.
|
||||
|
||||
When a job step calls `docker compose -f docker-compose.observability.yml up`, Docker Compose resolves relative bind-mount sources against `$(pwd)` inside the job container and passes the resulting absolute paths to the **host** daemon. For example, `./infra/observability/prometheus/prometheus.yml` becomes `/some/path/infra/observability/prometheus/prometheus.yml`, and the host daemon tries to bind-mount that path from the **host filesystem**.
|
||||
|
||||
In the default DooD setup (`runner-config.yaml` with only `valid_volumes: ["/var/run/docker.sock"]`), job container workspaces live in the act_runner overlay2 layer. The host has no corresponding directory at the job container's `$(pwd)` path, so the daemon auto-creates an empty directory in its place. The container then fails to start because the mount target was expected to be a file, not a directory:
|
||||
|
||||
```
|
||||
error mounting "…/prometheus/prometheus.yml" to rootfs at "/etc/prometheus/prometheus.yml": not a directory
|
||||
```
|
||||
|
||||
This affected all five config file bind mounts in `docker-compose.observability.yml`.
|
||||
|
||||
## Decision
|
||||
|
||||
Configure act_runner to store job workspaces on a real host path (`/srv/gitea-workspace`) and mount that path into both the runner container and every job container at the **same absolute path**. The identity of the host path and container path is the key constraint: Compose resolves to an absolute path and hands it to the host daemon, which looks for that exact path on the host filesystem.
|
||||
|
||||
**runner-config.yaml changes:**
|
||||
|
||||
```yaml
|
||||
container:
|
||||
workdir_parent: /srv/gitea-workspace
|
||||
valid_volumes:
|
||||
- "/var/run/docker.sock"
|
||||
- "/srv/gitea-workspace"
|
||||
options: "-v /srv/gitea-workspace:/srv/gitea-workspace"
|
||||
```
|
||||
|
||||
**Runner compose.yaml change** (host side — not in this repo):
|
||||
|
||||
```yaml
|
||||
runner:
|
||||
volumes:
|
||||
- /srv/gitea-workspace:/srv/gitea-workspace
|
||||
```
|
||||
|
||||
With this in place, `$(pwd)` inside a job container resolves to `/srv/gitea-workspace/<owner>/<repo>/`, which is a real directory on the host. Compose-managed bind mounts from that directory work without any additional steps.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
| Alternative | Why rejected |
|
||||
|---|---|
|
||||
| **overlay2 `MergedDir` sync via privileged nsenter** (the previous approach, see PR #599 v1) | Required `--privileged --pid=host` (effective root on the host) plus fragile overlay2 driver assumption. Introduced stale-file risk on the host and a second stable path (`/srv/familienarchiv-*/obs-configs`) to maintain separately from the source tree. Replaced by this ADR. |
|
||||
| **Build configs into a dedicated Docker image** (pattern used for MinIO bootstrap, see `infra/minio/Dockerfile`) | Viable for static files that change infrequently. Requires a build step and an image rebuild every time a config changes. Appropriate for bootstrap scripts; too heavy for frequently-tuned observability configs. |
|
||||
| **Add workspace directory to runner-config `valid_volumes` only** (without `workdir_parent`) | `valid_volumes` whitelists paths that workflow steps may reference, but does not change where act_runner stores workspaces. Without `workdir_parent`, the workspace would still be in overlay2 and the bind-mount resolution problem would remain. |
|
||||
| **Map workspace under a different host path than container path** (e.g. host `/srv/workspace`, container `/workspace`) | Compose resolves to the container-internal path (e.g. `/workspace/…`) and passes that to the host daemon. The host daemon interprets the source as a host path. If host `/workspace` does not exist, the daemon creates an empty directory — the original bug. The paths must be identical. |
|
||||
|
||||
## Consequences
|
||||
|
||||
- `/srv/gitea-workspace` must exist on the VPS before the runner starts. The directory was created as part of this change; it is not created automatically.
|
||||
- The runner container's `compose.yaml` (maintained outside this repo at `~/docker/gitea/compose.yaml` on the VPS) must include the `- /srv/gitea-workspace:/srv/gitea-workspace` volume line. This is an out-of-band operational dependency; the prerequisite is documented in `runner-config.yaml`.
|
||||
- `workdir_parent` applies to all jobs on this runner. Any future workflow that calls `docker compose` with relative bind mounts benefits automatically without further configuration.
|
||||
- Job workspaces persist across runs under `/srv/gitea-workspace`. act_runner manages per-run subdirectory cleanup. Orphaned directories from interrupted runs should be cleaned up manually if disk space becomes a concern.
|
||||
- Workflows that previously relied on `OBS_CONFIG_DIR` env var or the `obs-configs` stable path on the host no longer need those. Both were removed in this PR.
|
||||
- This pattern does **not** apply to the `nsenter`-based Caddy reload step (ADR-012), which manages a host systemd service — a different problem class with no bind-mount equivalent.
|
||||
|
||||
## References
|
||||
|
||||
- ADR-011 — single-tenant runner trust model
|
||||
- ADR-012 — nsenter via privileged container for host service management
|
||||
- Issue #598 — original observability stack bind-mount failure
|
||||
- `runner-config.yaml` — `workdir_parent`, `valid_volumes`, `options`
|
||||
57
docs/adr/016-obs-stack-co-location-ci-push.md
Normal file
57
docs/adr/016-obs-stack-co-location-ci-push.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# ADR-016: Observability stack co-location at `/opt/familienarchiv/` with CI-push config sync
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
Issue #601 established that the observability stack must survive Gitea CI workspace wipes between nightly runs. When the nightly job completes, act_runner deletes the job workspace. Any Docker container that bind-mounts a config file from a workspace path (`/srv/gitea-workspace/…/infra/observability/prometheus/prometheus.yml`) then references a path that no longer exists on the host. On the next nightly run, Docker Compose either auto-creates an empty directory in its place (causing the container to fail to start because a file mount receives a directory) or finds a stale file from a previous run if the workspace happened to land at the same path.
|
||||
|
||||
ADR-015 solved the workspace bind-mount resolution problem: job workspaces are stored at `/srv/gitea-workspace` so `$(pwd)` inside the job container maps to a real host path. But it did not address persistence: the workspace is still wiped after the job, so bind mounts from workspace-relative paths remain fragile across runs.
|
||||
|
||||
### Decision drivers
|
||||
|
||||
1. Bind-mount sources must point to a host path that persists indefinitely, not to a path that disappears after each CI run.
|
||||
2. Config files must reflect the committed state of the repo after every nightly run (no manual sync steps).
|
||||
3. Secrets must not be written to the workspace or to any path managed by CI; they must survive independently of deployments.
|
||||
4. The solution must not introduce new infrastructure dependencies (no SSH access from CI, no external registry, no additional server-side daemon).
|
||||
|
||||
### Alternatives considered
|
||||
|
||||
**A: Server-pull model** — a systemd timer or cron job on the server does `git pull` from the repo into `/opt/familienarchiv/` and then runs `docker compose up`. Rejected because: (1) requires git credentials on the server and a registered deploy key, (2) adds a second deployment mechanism that diverges from the CI-push model used for the main app stack, (3) timing coupling — the server pull must complete before CI's health checks run, requiring polling or a webhook.
|
||||
|
||||
**B: Separate directory (e.g. `/opt/obs/`)** — keeps obs configs isolated from the app stack. Rejected because: (1) the main app compose files are already in `/opt/familienarchiv/` (managed the same way), and (2) GlitchTip shares the `archive-db` PostgreSQL instance and `archiv-net` Docker network — it is architecturally part of the same deployment unit, not a separate one. Co-location reflects the actual coupling.
|
||||
|
||||
**C: Named Docker configs (Swarm)** — Docker Swarm supports first-class config objects that persist in the cluster. Rejected because the project does not use Swarm and introducing it solely for config persistence is a disproportionate dependency.
|
||||
|
||||
## Decision
|
||||
|
||||
The observability stack is co-located with the main application deployment at `/opt/familienarchiv/`:
|
||||
|
||||
- `docker-compose.observability.yml` → `/opt/familienarchiv/docker-compose.observability.yml`
|
||||
- `infra/observability/` → `/opt/familienarchiv/infra/observability/`
|
||||
|
||||
Both the nightly CI job (`nightly.yml`) and the release job (`release.yml`) copy these files from the workspace checkout to `/opt/familienarchiv/` using `cp -r` on every run (CI-push model). Containers always read config from the permanent location; a workspace wipe has no effect on running containers.
|
||||
|
||||
Environment variables follow a two-source model:
|
||||
|
||||
- `infra/observability/obs.env` (git-tracked, non-secret): all non-sensitive config — host ports, public URLs (`GLITCHTIP_DOMAIN`, `GF_SERVER_ROOT_URL`), and the default `POSTGRES_HOST`. Changes go through PR review. No credentials.
|
||||
- `/opt/familienarchiv/obs-secrets.env` (CI-written, per-deploy): passwords and secret keys only (`GRAFANA_ADMIN_PASSWORD`, `GLITCHTIP_SECRET_KEY`, `POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_HOST`), injected fresh from Gitea secrets on every nightly and release deploy. Gitea is the single source of truth for secrets — rotating a secret takes effect on the next deploy without manual server action.
|
||||
|
||||
Both files are passed explicitly via `--env-file` to every obs compose command (config dry-run and `up`). There is no implicit auto-read `.env`. The required key inventory is documented in `docs/DEPLOYMENT.md §4`.
|
||||
|
||||
The CI runner mounts `/opt/familienarchiv` as a bind mount into job containers (see `runner-config.yaml`). This requires a one-time `mkdir -p /opt/familienarchiv/infra` on the server and a runner restart after updating `runner-config.yaml` (see ADR-015 and `docs/DEPLOYMENT.md §3.1`).
|
||||
|
||||
## Consequences
|
||||
|
||||
**Positive:**
|
||||
- Bind-mount sources survive workspace wipes by definition — they are on a persistent host path.
|
||||
- Config is always in sync with the repo after each nightly run.
|
||||
- No new infrastructure dependencies; the CI-push model mirrors how the main app stack is deployed.
|
||||
- Secret rotation requires no manual server action — Gitea secrets are the authoritative store; `obs-secrets.env` is rewritten from scratch on every deploy so a secret change takes effect on the next nightly or release run.
|
||||
|
||||
**Negative:**
|
||||
- `cp -r` does not remove deleted files; a config file removed from the repo persists in `/opt/familienarchiv/infra/observability/` until manually deleted. Acceptable for this project's change frequency. A `rsync -a --delete` would give a clean mirror if this becomes a problem.
|
||||
- Mounting `/opt/familienarchiv/` into CI job containers expands the blast radius of a compromised workflow step — a malicious step could overwrite app compose files and Caddy config. Acceptable because the runner is single-tenant (trusted code only). See `runner-config.yaml` security comment.
|
||||
- Runner must be restarted (`systemctl restart gitea-runner`) after any change to `runner-config.yaml` for the new mount to take effect.
|
||||
48
docs/adr/017-management-port-security.md
Normal file
48
docs/adr/017-management-port-security.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# ADR-017: Spring Boot 4.0 management port shares the main security filter chain
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
The Familienarchiv backend runs Spring Boot Actuator on a dedicated management port (8081) so that Caddy never proxies `/actuator/*` requests and Prometheus can reach the scrape endpoint directly inside `archiv-net`.
|
||||
|
||||
In earlier Spring Boot versions (< 4.0), the management server ran in an isolated child application context whose security was governed independently by `ManagementWebSecurityAutoConfiguration`. The main app's `SecurityConfig` filter chain (port 8080) never intercepted requests arriving on port 8081.
|
||||
|
||||
In Spring Boot 4.0 with Jetty, this isolation was removed. The management server now traverses the **same** Spring Security `FilterChainProxy` as the main application. Concretely:
|
||||
|
||||
- Any `SecurityFilterChain` bean in the application context is evaluated for requests arriving on the management port.
|
||||
- There is no longer a separate "management security" child context.
|
||||
|
||||
This was discovered when Prometheus began receiving HTTP 401 responses from `/actuator/prometheus` despite the endpoint being exposed and the `micrometer-registry-prometheus` dependency being present. Prometheus rejected these responses with `received unsupported Content-Type "text/html"` because the main filter chain's form-login `DelegatingAuthenticationEntryPoint` was redirecting unauthenticated requests to `/login` (302 → HTML).
|
||||
|
||||
A secondary issue: Spring Boot 4.0 no longer auto-enables Prometheus metrics export — `management.prometheus.metrics.export.enabled` must be set explicitly, and the Prometheus scrape endpoint requires `spring-boot-starter-micrometer-metrics` (a new starter that was split out in Spring Boot 4.0).
|
||||
|
||||
## Decision
|
||||
|
||||
1. **Dedicated management `SecurityFilterChain`** scoped to `/actuator/**` at `@Order(1)` (highest precedence). This chain:
|
||||
- `permitAll()` for `/actuator/health` and `/actuator/prometheus` — required for Docker health checks and unauthenticated Prometheus scraping.
|
||||
- `authenticated()` for all other actuator endpoints — blocks `/actuator/metrics`, `/actuator/info`, etc. without credentials.
|
||||
- Uses an explicit `401` entry point (not form-login redirect) so that API clients — including Prometheus — receive a machine-readable status code rather than an HTML redirect.
|
||||
- No CSRF, no form login.
|
||||
|
||||
2. **Belt-and-suspenders `permitAll()` in the main `SecurityFilterChain`** for `/actuator/health` and `/actuator/prometheus`, in case a future configuration change causes these paths to escape the management chain's `securityMatcher`.
|
||||
|
||||
3. **Network isolation as the outer defense boundary.** Port 8081 is not published in `docker-compose.yml` and is not routed through Caddy. Only services inside `archiv-net` (primarily Prometheus and the Docker health checker) can reach the management port.
|
||||
|
||||
## Alternatives rejected
|
||||
|
||||
- **Exclude `ManagementWebSecurityAutoConfiguration`:** This auto-configuration no longer exists in Spring Boot 4.0. Exclusion is not applicable.
|
||||
- **Keep `SecurityConfig` as the sole filter chain without `@Order(1)` management chain:** The main chain's form-login `DelegatingAuthenticationEntryPoint` redirects browser-like clients to `/login` (302). Prometheus and automated health check clients cannot follow this redirect, so the endpoint would be unreachable without a dedicated chain that returns plain 401 or 200.
|
||||
- **Per-endpoint `@Order(1)` filter chain using `EndpointRequest.toAnyEndpoint()`:** The `spring-boot-security` artifact that provides `EndpointRequest` is not a transitive dependency of `spring-boot-starter-actuator` in Spring Boot 4.0. Using a path-based `securityMatcher("/actuator/**")` achieves the same scoping without an extra dependency.
|
||||
|
||||
## Consequences
|
||||
|
||||
- All actuator endpoints on port 8081 that are not explicitly `permitAll()`-ed require HTTP Basic credentials. Without valid credentials, the response is 401 (not a redirect).
|
||||
- Adding a new actuator endpoint to `management.endpoints.web.exposure.include` implicitly protects it via `anyRequest().authenticated()` in the management chain — no additional `permitAll()` needed unless intentional.
|
||||
- A regression test (`ActuatorPrometheusIT`) verifies:
|
||||
- `/actuator/prometheus` returns 200 without credentials.
|
||||
- `/actuator/metrics` returns 401 without credentials.
|
||||
- Prometheus metric names are present in the response body.
|
||||
- If port 8081 is ever accidentally published in `docker-compose.yml`, actuator endpoints other than health and prometheus are still protected by HTTP Basic. This reduces (but does not eliminate) the risk of inadvertent exposure.
|
||||
86
docs/adr/018-glitchtip-frontend-error-tracking.md
Normal file
86
docs/adr/018-glitchtip-frontend-error-tracking.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# ADR-018: GlitchTip frontend error tracking via @sentry/sveltekit
|
||||
|
||||
**Date:** 2026-05-17
|
||||
**Status:** Accepted
|
||||
**Deciders:** Marcel Raddatz
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
The Familienarchiv had no client-side error reporting. When a user encountered a crash
|
||||
or unhandled error in the SvelteKit frontend, there was no way for the operator to
|
||||
observe it — errors were invisible until a user manually reported them. A GlitchTip
|
||||
instance (self-hosted, Sentry-compatible) was already running as part of the
|
||||
observability stack (`docker-compose.observability.yml`). The backend already reported
|
||||
server-side errors to it.
|
||||
|
||||
We needed a way to:
|
||||
1. Capture frontend errors automatically and route them to GlitchTip.
|
||||
2. Give users a visible error identifier they can include in a support message.
|
||||
3. Do this without leaking personally identifiable information (PII) from the family
|
||||
archive — documents contain personal histories, names, and relationships.
|
||||
|
||||
---
|
||||
|
||||
## Decision
|
||||
|
||||
Use `@sentry/sveltekit` (the official Sentry SDK for SvelteKit) to:
|
||||
|
||||
- Initialise with `sendDefaultPii: false` on both `hooks.server.ts` and `hooks.client.ts`.
|
||||
- Pass a callback to `Sentry.handleErrorWithSentry()` that returns
|
||||
`{ message, errorId }` where `errorId` is `Sentry.lastEventId()` when Sentry
|
||||
captured the event, or a fresh `crypto.randomUUID()` as fallback.
|
||||
- Display the `errorId` on the `+error.svelte` page so users can include it in a
|
||||
report to the operator.
|
||||
|
||||
The SDK is initialised with `enabled: !!import.meta.env.VITE_SENTRY_DSN` so that
|
||||
development and CI builds without a DSN configured do not send any events.
|
||||
|
||||
`VITE_SENTRY_DSN` is a write-only ingest key — it can POST events to GlitchTip but
|
||||
cannot read them. It is safe to include in the client bundle per the Sentry security
|
||||
model; it does not require rotation like a password.
|
||||
|
||||
---
|
||||
|
||||
## Alternatives considered
|
||||
|
||||
**Sentry SaaS** — rejected. The archive contains private family documents and personal
|
||||
history. Sending error events with stack traces to a US-hosted third party is
|
||||
inconsistent with the project's data-minimisation posture. Self-hosted GlitchTip on
|
||||
the same Hetzner VPS keeps all data on infrastructure the operator controls.
|
||||
|
||||
**Custom error logging endpoint** — rejected. The @sentry/sveltekit SDK handles
|
||||
SvelteKit's hook lifecycle, source-map upload, and event grouping automatically.
|
||||
Reimplementing this would cost significant engineering time for no benefit.
|
||||
|
||||
**Log-only (no user-visible errorId)** — rejected. Without a visible error ID, users
|
||||
can only describe what happened in natural language, making it hard to correlate a
|
||||
report with a specific GlitchTip event. The `errorId` closes this gap at negligible UI
|
||||
cost.
|
||||
|
||||
---
|
||||
|
||||
## Consequences
|
||||
|
||||
**Positive:**
|
||||
- Frontend errors are now observable without requiring user reports.
|
||||
- Users can provide an `errorId` that maps directly to a GlitchTip event.
|
||||
- `sendDefaultPii: false` ensures names, IPs, and cookie values are not included in
|
||||
captured events.
|
||||
- `tracesSampleRate: 0.1` limits trace volume to 10% of transactions, keeping
|
||||
GlitchTip load low on the shared VPS.
|
||||
|
||||
**Negative / trade-offs:**
|
||||
- The `@sentry/sveltekit` SDK is now a production dependency. SDK updates must be
|
||||
reviewed for changes to the default PII scrubbing behaviour.
|
||||
- The `handleError` callback in both hooks returns a hardcoded English message
|
||||
(`'An unexpected error occurred'`). This bypasses Paraglide i18n — the error page
|
||||
will always show English text when the hooks are active, regardless of the user's
|
||||
locale. This is acceptable because: (a) the error page is a last-resort fallback
|
||||
not part of normal UX, (b) the `errorId` is the actionable information, not the
|
||||
message text. A future ADR may address this if internationalised error messages
|
||||
become a requirement.
|
||||
- `Sentry.lastEventId()` returns `undefined` when Sentry did not capture the event
|
||||
(e.g. DSN not configured). The `crypto.randomUUID()` fallback guarantees an `errorId`
|
||||
is always present, but that UUID will not appear in GlitchTip.
|
||||
94
docs/adr/019-container-hardening-baseline.md
Normal file
94
docs/adr/019-container-hardening-baseline.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# ADR-019 — Container hardening baseline: non-root user + read-only filesystem
|
||||
|
||||
**Status:** Accepted
|
||||
**Date:** 2026-05-17
|
||||
**PR:** #611
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
The OCR service ran as `root` inside its container by default. This violated CIS Docker Benchmark §4.1 and CIS §4.6, and meant that any exploit in the OCR pipeline (untrusted PDF content, model deserialization, ZIP handling) could write to or execute anything inside the container without restriction.
|
||||
|
||||
The following risks were present before this baseline:
|
||||
|
||||
- A path-traversal in the ZIP-based training endpoint could overwrite arbitrary paths on the container filesystem (including Python source files and model files).
|
||||
- A compromised dependency running at startup could persist itself to the image layers or model volumes.
|
||||
- Misconfigured model downloads could overwrite `/etc/passwd` or similar via path-traversal — possible because root can write everywhere.
|
||||
|
||||
---
|
||||
|
||||
## Decision
|
||||
|
||||
All containers in this project that have no operational need for elevated privileges **must** apply the following hardening baseline:
|
||||
|
||||
### 1. Non-root user
|
||||
|
||||
Create a dedicated user with a fixed UID and no login shell:
|
||||
|
||||
```dockerfile
|
||||
RUN useradd --no-create-home --shell /usr/sbin/nologin --uid 1000 <service>
|
||||
```
|
||||
|
||||
Set `HOME` explicitly to a path owned by this user. Do not rely on `~` expansion for any path resolution in application code.
|
||||
|
||||
### 2. Read-only container filesystem
|
||||
|
||||
```yaml
|
||||
read_only: true
|
||||
```
|
||||
|
||||
All paths the application writes to at runtime must be explicitly declared as either a named volume or a `tmpfs` mount. This turns any unexpected write attempt into an immediate, visible `PermissionError` rather than a silent success.
|
||||
|
||||
### 3. Per-path write carve-outs
|
||||
|
||||
Declare only the paths that are actually written at runtime:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- <service>_models:/app/models # persistent model storage
|
||||
- <service>_cache:/app/cache # HuggingFace / ketos download cache
|
||||
tmpfs:
|
||||
- /tmp:size=512m # transient scratch space (ZIP extraction etc.)
|
||||
```
|
||||
|
||||
Do not mount the home directory as a volume unless necessary — use `XDG_CACHE_HOME` and `TORCH_HOME` env vars to redirect library cache writes to the declared writable paths instead.
|
||||
|
||||
### 4. Dropped capabilities and privilege escalation prevention
|
||||
|
||||
```yaml
|
||||
cap_drop: [ALL]
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
```
|
||||
|
||||
A Python/FastAPI service on port 8000+ requires no Linux capabilities. Dropping all and blocking privilege escalation via setuid prevents any capability regain even if a dependency contains a SUID binary.
|
||||
|
||||
### 5. Startup root canary
|
||||
|
||||
Log a warning during startup if the process is running as root. This catches misconfiguration (e.g., `USER` directive accidentally removed in a future Dockerfile edit) before it becomes a silent vulnerability:
|
||||
|
||||
```python
|
||||
if os.getuid() == 0:
|
||||
logger.warning("Running as root — CIS Docker §4.1 violation")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Consequences
|
||||
|
||||
**Positive:**
|
||||
- Any exploit that achieves code execution inside the container is confined: it cannot write outside the declared volumes, cannot acquire new capabilities, and cannot persist to the image filesystem.
|
||||
- `PermissionError` on startup is an explicit, diagnosable failure rather than a silent privilege misuse.
|
||||
- The startup canary catches accidental regressions in the non-root setup.
|
||||
|
||||
**Negative / operational cost:**
|
||||
- Every new feature that writes to a new path (e.g., a new model cache directory, a new scratch path) must add a volume or tmpfs mount. The `read_only: true` flag makes this a hard constraint, not a suggestion.
|
||||
- Library dependencies that write to `HOME` without respecting `XDG_CACHE_HOME` must be identified and redirected explicitly (see `TORCH_HOME`, `XDG_CACHE_HOME`, `HF_HOME` in `docker-compose.yml`).
|
||||
- Existing named volumes written by root (pre-baseline) must be dropped and recreated before upgrading. See [DEPLOYMENT.md §8](../DEPLOYMENT.md#8-upgrade-notes).
|
||||
|
||||
---
|
||||
|
||||
## Applicability
|
||||
|
||||
This baseline applies to the OCR service (PR #611). It should be applied to any new container added to the project unless there is a documented, specific operational reason a capability or writable filesystem is required.
|
||||
94
docs/adr/020-stateful-auth-via-spring-session-jdbc.md
Normal file
94
docs/adr/020-stateful-auth-via-spring-session-jdbc.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# ADR-020 — Stateful Authentication via Spring Session JDBC
|
||||
|
||||
**Date:** 2026-05-17
|
||||
**Status:** Accepted
|
||||
**Issue:** #523
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
PR #521 (closing #520) introduced `AuthTokenCookieFilter` to unblock a production deploy.
|
||||
The filter promotes an `auth_token` cookie — which contains the full HTTP Basic credential
|
||||
(`Basic <base64(email:password)>`) — to an `Authorization` header so browser-direct `/api/*`
|
||||
calls authenticate correctly behind Caddy.
|
||||
|
||||
This model has three concrete problems:
|
||||
|
||||
1. **Cookie = credential.** A stolen `auth_token` cookie leaks the user's password in
|
||||
base64-encoded plaintext. No decode step is needed; the cookie value is directly usable
|
||||
as a credential forever.
|
||||
2. **No server-side revocation.** Logout deletes the local cookie but the credential
|
||||
remains valid until the 24 h `Max-Age` elapses. An attacker who copied the cookie before
|
||||
logout retains access.
|
||||
3. **No audit signal.** There is no server-side record of login or logout events. Observability
|
||||
and compliance tooling cannot reconstruct "who was logged in when".
|
||||
|
||||
Additionally, Nora flagged that `url.protocol === 'https:'` in `login/+page.server.ts` is
|
||||
incorrect behind Caddy: SvelteKit sees `http`, so `Secure=false` was set on the credential
|
||||
cookie in production, transmitting it in cleartext from Caddy to the browser on any network
|
||||
path without TLS.
|
||||
|
||||
---
|
||||
|
||||
## Decision
|
||||
|
||||
Replace the `auth_token` / `AuthTokenCookieFilter` model with **Spring Session JDBC**:
|
||||
|
||||
- A `POST /api/auth/login` endpoint in a new `auth` package authenticates with `email +
|
||||
password`, creates a server-side session record in PostgreSQL, and returns the `AppUser`
|
||||
JSON in the response body.
|
||||
- The response sets an **opaque** `fa_session` cookie (`HttpOnly`, `SameSite=Strict`,
|
||||
`Secure` in non-dev profiles, `Max-Age=28800` — 8 h idle timeout) that contains only the
|
||||
session ID, never a credential.
|
||||
- A `POST /api/auth/logout` endpoint invalidates the session record immediately. Subsequent
|
||||
requests carrying the same cookie return 401.
|
||||
- `AuthTokenCookieFilter` is deleted in the same PR. No transitional coexistence period.
|
||||
- Cookie name `fa_session` (not the default `SESSION`) minimises framework fingerprinting.
|
||||
|
||||
Session storage uses the canonical `spring_session` / `spring_session_attributes` tables,
|
||||
re-introduced via `V67__recreate_spring_session_tables.sql` (dropped by V2 when the
|
||||
dependency was previously removed as unused).
|
||||
|
||||
**Idle timeout:** 8 h (`MaxInactiveIntervalInSeconds = 28800`). No 24 h absolute cap is
|
||||
implemented in Phase 1 — the 8 h idle bound contains the risk to one workday. A weekend-long
|
||||
active session is acceptable given the family-archive threat model. The absolute cap and
|
||||
additional revocation paths (password-change, admin force-logout) land in Phase 2 (#524).
|
||||
|
||||
---
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### Stay on Basic cookie + add a server-side revocation table
|
||||
|
||||
Keeps the credential-in-cookie problem. Implementing a revocation table would re-invent
|
||||
Spring Session badly — we'd write bespoke session storage that already exists and is
|
||||
well-tested upstream.
|
||||
|
||||
### JWT (stateless)
|
||||
|
||||
Opaque revocation is simpler than JWT revocation (token introspection or short-lived tokens
|
||||
+ refresh). The cluster is single-node; session affinity is not a constraint. Stateless tokens
|
||||
buy complexity without benefit here. JWKS infrastructure and refresh-token rotation are
|
||||
unnecessary for a family archive with < 50 concurrent users.
|
||||
|
||||
### Keep `auth_token` cookie but add `AuthTokenCookieFilter` improvements
|
||||
|
||||
The root problem is that the cookie contains the credential. No amount of filter hardening
|
||||
fixes that. Nora's P1 flag stands until the credential leaves the cookie.
|
||||
|
||||
---
|
||||
|
||||
## Consequences
|
||||
|
||||
- **One breaking deploy.** All existing sessions (the `auth_token` cookies) become inert
|
||||
on the next request after the deploy. The SvelteKit `handleAuth` hook redirects to
|
||||
`/login?reason=expired`; a banner renders. Users re-login. No data loss.
|
||||
- **~2 KB per active session** in PostgreSQL (`spring_session_attributes` stores the
|
||||
serialised `SecurityContext`). With < 50 family members, this is immaterial.
|
||||
- **Session cleanup task** runs on the default Spring Session JDBC schedule (every 10 min).
|
||||
No custom job needed.
|
||||
- **Caddy / infrastructure unchanged.** `forward-headers-strategy: native` already ensures
|
||||
`Secure` cookies work correctly behind the reverse proxy.
|
||||
- **Dev profile:** `application-dev.yaml` sets `secure: false` on the session cookie so
|
||||
local HTTP dev (port 5173 → 8080) works without TLS.
|
||||
@@ -8,9 +8,11 @@ Person(member, "Family Member", "Access by administrator invite. Searches, brows
|
||||
|
||||
System(familienarchiv, "Familienarchiv", "Web application for digitising, organising, and searching family documents")
|
||||
System_Ext(mail, "Email Service", "SMTP server. Delivers notification emails (mentions, replies) and password-reset links.")
|
||||
System_Ext(glitchtip, "GlitchTip", "Self-hosted error tracking (Sentry-compatible). Receives frontend and backend error events with stack traces.")
|
||||
|
||||
Rel(admin, familienarchiv, "Manages via browser", "HTTPS")
|
||||
Rel(member, familienarchiv, "Searches, reads, and transcribes via browser", "HTTPS")
|
||||
Rel(familienarchiv, mail, "Sends notification and password-reset emails (optional)", "SMTP")
|
||||
Rel(familienarchiv, glitchtip, "Sends error events with errorId and stack trace", "HTTPS")
|
||||
|
||||
@enduml
|
||||
|
||||
@@ -17,6 +17,19 @@ System_Boundary(archiv, "Familienarchiv (Docker Compose)") {
|
||||
Container(mc, "Bucket / Service-Account Init", "MinIO Client (mc)", "One-shot container on startup. Idempotent: creates the archive bucket, the archiv-app service account, and attaches the readwrite policy.")
|
||||
}
|
||||
|
||||
System_Boundary(observability, "Observability Stack (/opt/familienarchiv/docker-compose.observability.yml)") {
|
||||
Container(prometheus, "Prometheus", "prom/prometheus:v3.4.0", "Scrapes metrics from backend management port 8081 (/actuator/prometheus), node-exporter, and cAdvisor. Retention: 30 days.")
|
||||
Container(node_exporter, "Node Exporter", "prom/node-exporter:v1.9.0", "Host-level CPU, memory, disk, and network metrics.")
|
||||
Container(cadvisor, "cAdvisor", "gcr.io/cadvisor/cadvisor:v0.52.1", "Per-container resource metrics.")
|
||||
Container(loki, "Loki", "grafana/loki:3.4.2", "Stores log streams from all containers.")
|
||||
Container(promtail, "Promtail", "grafana/promtail:3.4.2", "Ships Docker container logs to Loki via Docker SD.")
|
||||
Container(tempo, "Tempo", "grafana/tempo:2.7.2", "Distributed trace storage. OTLP HTTP receiver on port 4318 (archiv-net). Grafana queries traces on port 3200 (obs-net). All ports internal only.")
|
||||
Container(grafana, "Grafana", "grafana/grafana-oss:11.6.1", "Unified observability UI — dashboards, logs, traces. Datasources (Prometheus, Loki, Tempo) and three dashboards are auto-provisioned.")
|
||||
Container(glitchtip, "GlitchTip", "glitchtip/glitchtip:6.1.6", "Sentry-compatible error tracker — web process. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces.")
|
||||
Container(obs_glitchtip_worker, "GlitchTip Worker", "glitchtip/glitchtip:6.1.6", "Celery + beat worker — async event ingestion, notifications, cleanup.")
|
||||
Container(obs_redis, "Redis", "redis:7-alpine", "Celery task queue for GlitchTip async workers.")
|
||||
}
|
||||
|
||||
Rel(user, caddy, "HTTPS", "TLS 1.2/1.3")
|
||||
Rel(caddy, frontend, "Reverse proxies non-/api requests", "HTTP / loopback:3000")
|
||||
Rel(caddy, backend, "Reverse proxies /api/*", "HTTP / loopback:8080")
|
||||
@@ -28,5 +41,12 @@ Rel(backend, ocr, "OCR job requests with presigned MinIO URL", "HTTP / REST / JS
|
||||
Rel(backend, mail, "Sends notification and password-reset emails (optional)", "SMTP")
|
||||
Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
|
||||
Rel(mc, storage, "Bootstraps bucket + service account on startup", "MinIO Client CLI")
|
||||
Rel(promtail, loki, "Pushes log streams", "HTTP/Loki push API")
|
||||
Rel(backend, tempo, "Sends distributed traces via OTLP", "HTTP / OTLP / port 4318 (archiv-net)")
|
||||
Rel(grafana, prometheus, "Queries metrics", "HTTP 9090")
|
||||
Rel(grafana, loki, "Queries logs", "HTTP 3100")
|
||||
Rel(grafana, tempo, "Queries traces", "HTTP 3200")
|
||||
Rel(glitchtip, db, "Stores error events in glitchtip DB", "PostgreSQL / archiv-net")
|
||||
Rel(obs_glitchtip_worker, obs_redis, "Processes Celery tasks", "Redis / obs-net")
|
||||
|
||||
@enduml
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
@startuml
|
||||
title Authentication Flow (behind Caddy reverse proxy)
|
||||
title Authentication Flow (Spring Session JDBC, behind Caddy reverse proxy)
|
||||
note over Browser, DB
|
||||
Phase 1 of the auth rewrite (ADR-020 / #523).
|
||||
Replaces the Basic-credentials-in-cookie model
|
||||
with an opaque server-side session id (fa_session).
|
||||
end note
|
||||
|
||||
actor User
|
||||
participant Browser
|
||||
participant "Caddy (TLS termination)" as Caddy
|
||||
participant "Frontend (SvelteKit)" as Frontend
|
||||
participant "Backend (Spring Boot)" as Backend
|
||||
participant PostgreSQL as DB
|
||||
participant "spring_session\n(PostgreSQL)" as DB
|
||||
|
||||
== Login ==
|
||||
User -> Browser: Enter email + password
|
||||
Browser -> Caddy: HTTPS POST /login (form action)
|
||||
Browser -> Caddy: HTTPS POST /?/login (form action)
|
||||
note right of Caddy
|
||||
Caddy terminates TLS and forwards
|
||||
to Frontend over HTTP with:
|
||||
@@ -17,33 +23,54 @@ note right of Caddy
|
||||
X-Forwarded-For: <client IP>
|
||||
X-Forwarded-Host: archiv.raddatz.cloud
|
||||
end note
|
||||
Caddy -> Frontend: HTTP POST /login\n+ X-Forwarded-Proto: https
|
||||
Frontend -> Frontend: Base64 encode "email:password"
|
||||
Frontend -> Backend: GET /api/users/me\nAuthorization: Basic <token>\n+ X-Forwarded-Proto: https
|
||||
Caddy -> Frontend: HTTP POST /?/login + X-Forwarded-Proto: https
|
||||
Frontend -> Backend: POST /api/auth/login\n{email, password}\n+ X-Forwarded-Proto: https
|
||||
note right of Backend
|
||||
server.forward-headers-strategy: native
|
||||
Jetty's ForwardedRequestCustomizer
|
||||
reads X-Forwarded-Proto so
|
||||
request.getScheme() returns "https".
|
||||
→ request.getScheme() = "https"
|
||||
→ Secure cookie flag set automatically.
|
||||
end note
|
||||
Backend -> Backend: Spring Security parses Basic Auth
|
||||
Backend -> Backend: AuthenticationManager\nauthenticate(email, password)
|
||||
Backend -> DB: SELECT user WHERE email=?
|
||||
DB --> Backend: AppUser + groups + permissions
|
||||
Backend -> Backend: BCrypt.matches(password, hash)
|
||||
Backend --> Frontend: 200 OK — UserDTO
|
||||
Frontend -> Caddy: Set-Cookie: auth_token=<base64>\n(httpOnly, **Secure**, SameSite=strict, maxAge=86400)
|
||||
note right of Frontend
|
||||
Secure flag is set because the
|
||||
request scheme observed by the
|
||||
app is https (forwarded by Caddy).
|
||||
end note
|
||||
Caddy -> Browser: HTTPS 200 + Set-Cookie
|
||||
Browser -> Caddy: HTTPS GET / (next request)
|
||||
Caddy -> Frontend: HTTP GET / + X-Forwarded-Proto: https
|
||||
Frontend -> Frontend: hooks.server.ts reads auth_token cookie
|
||||
Frontend -> Backend: GET /api/users/me\nAuthorization: Basic <token>
|
||||
Backend --> Frontend: 200 OK — user in event.locals
|
||||
Frontend --> Caddy: rendered page
|
||||
Caddy --> Browser: HTTPS 200
|
||||
Backend -> Backend: BCrypt.matches(password, hash)\n(timing-safe: dummy hash on miss)
|
||||
Backend -> Backend: getSession(true).setAttribute(\n SPRING_SECURITY_CONTEXT, ctx)
|
||||
Backend -> DB: INSERT spring_session\n+ spring_session_attributes
|
||||
Backend -> Backend: AuditService.log(LOGIN_SUCCESS,\n {userId, ip, ua})
|
||||
Backend --> Frontend: 200 OK — AppUser\nSet-Cookie: fa_session=<opaque>;\n Path=/; HttpOnly; SameSite=Strict; Secure
|
||||
Frontend -> Frontend: Parse Set-Cookie, re-emit fa_session\n(matches backend attrs)
|
||||
Frontend --> Caddy: 303 → /\nSet-Cookie: fa_session=<opaque>
|
||||
Caddy --> Browser: HTTPS 303 + Set-Cookie
|
||||
|
||||
== Authenticated request ==
|
||||
Browser -> Caddy: HTTPS GET /\nCookie: fa_session=<opaque>
|
||||
Caddy -> Frontend: HTTP GET / + Cookie + X-Forwarded-Proto: https
|
||||
Frontend -> Frontend: hooks.server.ts reads fa_session
|
||||
Frontend -> Backend: GET /api/users/me\nCookie: fa_session=<opaque>
|
||||
Backend -> DB: SELECT * FROM spring_session\nWHERE SESSION_ID = ?
|
||||
DB --> Backend: row (or null if expired)
|
||||
alt Session valid
|
||||
Backend -> DB: UPDATE spring_session\nSET LAST_ACCESS_TIME = now
|
||||
Backend --> Frontend: 200 OK — AppUser
|
||||
Frontend --> Caddy: rendered page
|
||||
Caddy --> Browser: HTTPS 200
|
||||
else Session expired (idle > 8h) or unknown
|
||||
Backend --> Frontend: 401 Unauthorized
|
||||
Frontend -> Frontend: hooks: delete fa_session cookie
|
||||
Frontend --> Caddy: 302 → /login?reason=expired
|
||||
Caddy --> Browser: HTTPS 302
|
||||
end
|
||||
|
||||
== Logout ==
|
||||
Browser -> Caddy: HTTPS POST /logout
|
||||
Caddy -> Frontend: HTTP POST /logout\nCookie: fa_session=<opaque>
|
||||
Frontend -> Backend: POST /api/auth/logout\nCookie: fa_session=<opaque>
|
||||
Backend -> Backend: session.invalidate()\nSecurityContextHolder.clearContext()
|
||||
Backend -> DB: DELETE FROM spring_session\nWHERE SESSION_ID = ?
|
||||
Backend -> Backend: AuditService.log(LOGOUT,\n {userId, ip, ua})
|
||||
Backend --> Frontend: 204 No Content
|
||||
Frontend -> Frontend: cookies.delete('fa_session')
|
||||
Frontend --> Caddy: 303 → /login
|
||||
Caddy --> Browser: HTTPS 303 (cookie cleared)
|
||||
|
||||
@enduml
|
||||
|
||||
@@ -19,6 +19,39 @@ Both containers live in the `gitea_gitea` Docker network on the VPS. The runner
|
||||
|
||||
The `gitea-runner` container mounts the host Docker socket (`/var/run/docker.sock`). When a workflow job runs, act_runner spawns a **sibling container** for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`), enabling `docker compose` calls in workflow steps.
|
||||
|
||||
### Workspace bind-mount setup (DooD path resolution)
|
||||
|
||||
When a workflow step calls `docker compose up` with relative bind-mount sources (e.g. `./infra/observability/prometheus/prometheus.yml`), Compose resolves them against `$(pwd)` inside the job container and passes the resulting **absolute path** to the host Docker daemon. The host daemon then tries to bind-mount that path from the **host filesystem**.
|
||||
|
||||
In the default DooD setup the job container's workspace lives in the act_runner overlay2 layer — the host has no directory at that path, auto-creates an empty one, and the container fails with:
|
||||
|
||||
```
|
||||
error mounting "…/prometheus/prometheus.yml" to rootfs at "/etc/prometheus/prometheus.yml": not a directory
|
||||
```
|
||||
|
||||
**Solution (ADR-015):** store job workspaces on a real host path and mount it at the **same absolute path** inside the runner and every job container. `runner-config.yaml` configures this via `workdir_parent`, `valid_volumes`, and `options`.
|
||||
|
||||
**One-time host setup** (required on any fresh VPS):
|
||||
|
||||
```bash
|
||||
mkdir -p /srv/gitea-workspace
|
||||
# Then add to the runner service in ~/docker/gitea/compose.yaml:
|
||||
# volumes:
|
||||
# - /srv/gitea-workspace:/srv/gitea-workspace
|
||||
# Restart the runner container for the change to take effect.
|
||||
```
|
||||
|
||||
The path `/srv/gitea-workspace` is the canonical workspace root. It must be identical on the host and inside job containers — if the paths differ, Compose still resolves to the container-internal path, which the host daemon cannot find (the original bug).
|
||||
|
||||
**Disk management:** act_runner cleans per-run subdirectories on completion. Orphaned directories from interrupted runs accumulate under `/srv/gitea-workspace` and should be pruned manually if disk space becomes a concern:
|
||||
|
||||
```bash
|
||||
# List workspace directories older than 7 days
|
||||
find /srv/gitea-workspace -mindepth 3 -maxdepth 3 -type d -mtime +7
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Running host-level commands from CI (nsenter pattern)
|
||||
|
||||
Job containers are unprivileged and do not share the host's PID/mount/network namespaces. Commands like `systemctl` that target the host daemon are therefore unavailable by default. When a workflow step needs to manage a host service (e.g. `systemctl reload caddy`), it uses the Docker socket to spin up a **privileged sibling container** in the host PID namespace:
|
||||
@@ -108,6 +141,33 @@ nsenter: failed to execute /bin/systemctl: No such file or directory
|
||||
|
||||
The first error means the Docker socket is not mounted into the job container — check `valid_volumes` in `/root/docker/gitea/runner-config.yaml` on the VPS. The second means the Alpine image is running but cannot enter the host mount namespace; verify `--privileged` and `--pid=host` are both present in the workflow step.
|
||||
|
||||
**Failure mode 4 — workspace bind-mount not configured (observability stack or any compose-with-file-mounts job)**
|
||||
|
||||
Symptom in CI log:
|
||||
```
|
||||
Error response from daemon: error while creating mount source path "…/prometheus/prometheus.yml": mkdir …: not a directory
|
||||
```
|
||||
|
||||
Or the service starts but immediately crashes because a config file was mounted as an empty directory.
|
||||
|
||||
Cause: `/srv/gitea-workspace` does not exist on the host, or the runner container's `compose.yaml` is missing the `- /srv/gitea-workspace:/srv/gitea-workspace` volume line.
|
||||
|
||||
Diagnosis:
|
||||
```bash
|
||||
ssh root@<vps>
|
||||
ls -la /srv/gitea-workspace # must exist and be a directory
|
||||
docker inspect gitea-runner | grep -A5 Mounts # must show /srv/gitea-workspace
|
||||
```
|
||||
|
||||
Recovery:
|
||||
```bash
|
||||
mkdir -p /srv/gitea-workspace
|
||||
# Add volume line to runner compose.yaml, then:
|
||||
docker compose -f ~/docker/gitea/compose.yaml up -d gitea-runner
|
||||
```
|
||||
|
||||
See `docs/DEPLOYMENT.md §3.1` and ADR-015 for the full setup rationale.
|
||||
|
||||
---
|
||||
|
||||
## Gitea vs GitHub Actions Differences
|
||||
@@ -200,7 +260,7 @@ jobs:
|
||||
working-directory: frontend
|
||||
- name: Upload screenshots
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4 # ← upgraded from v3
|
||||
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade.
|
||||
with:
|
||||
name: unit-test-screenshots
|
||||
path: frontend/test-results/screenshots/
|
||||
@@ -227,7 +287,7 @@ jobs:
|
||||
working-directory: backend
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4 # ← upgraded from v3
|
||||
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade.
|
||||
with:
|
||||
name: backend-test-results
|
||||
path: backend/target/surefire-reports/
|
||||
@@ -329,7 +389,7 @@ jobs:
|
||||
E2E_BACKEND_URL: http://localhost:8080
|
||||
- name: Upload E2E results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4 # ← upgraded from v3
|
||||
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade.
|
||||
with:
|
||||
name: e2e-results
|
||||
path: frontend/test-results/e2e/
|
||||
|
||||
@@ -12,11 +12,11 @@ The original spec in this doc proposed an overlay pattern (`docker compose -f do
|
||||
|
||||
---
|
||||
|
||||
## Observability stack — not yet deployed
|
||||
## Observability stack
|
||||
|
||||
Prometheus, Loki, Grafana, Alertmanager, Uptime Kuma, GlitchTip and ntfy are **not** part of the production deployment that #497 landed. They are tracked as follow-up issue #498.
|
||||
The observability stack (Prometheus, Loki, Grafana, Tempo, GlitchTip) ships as a separate `docker-compose.observability.yml` alongside the main stack. Configuration lives under `infra/observability/`.
|
||||
|
||||
When that lands the observability containers will join `docker-compose.prod.yml` under a dedicated profile so they can be operated alongside the application stack without affecting the application containers' restart cycle.
|
||||
→ See [docs/DEPLOYMENT.md §4](../DEPLOYMENT.md#4-logs--observability) for the full setup procedure, service URLs, first-run steps, and env var reference.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -165,7 +165,7 @@ npm run check # svelte-check (type checking)
|
||||
|
||||
```bash
|
||||
npm run test # Vitest unit + server tests (headless)
|
||||
npm run test:coverage # Coverage report (server project only)
|
||||
npm run test:coverage # Coverage report (server + client)
|
||||
npm run test:e2e # Playwright E2E tests
|
||||
npm run test:e2e:headed # Playwright E2E with visible browser
|
||||
npm run test:e2e:ui # Playwright UI mode
|
||||
|
||||
@@ -29,6 +29,6 @@ ENV NODE_ENV=production
|
||||
COPY --from=build /app/build ./build
|
||||
COPY --from=build /app/package.json ./package.json
|
||||
COPY --from=build /app/package-lock.json ./package-lock.json
|
||||
RUN npm ci --omit=dev
|
||||
RUN npm ci --omit=dev --ignore-scripts
|
||||
EXPOSE 3000
|
||||
CMD ["node", "build"]
|
||||
|
||||
@@ -38,14 +38,16 @@ export default defineConfig(
|
||||
'no-undef': 'off',
|
||||
// This rule is designed for Svelte 5's own routing system using resolve().
|
||||
// In SvelteKit, <a href> and goto() from $app/navigation are the correct patterns — resolve() is not needed.
|
||||
'svelte/no-navigation-without-resolve': 'off'
|
||||
'svelte/no-navigation-without-resolve': 'off',
|
||||
// Prevents accidental console.log left in source. console.warn and console.error
|
||||
// are still permitted for intentional server-side logging (e.g. hooks.server.ts).
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }]
|
||||
}
|
||||
},
|
||||
{
|
||||
files: ['**/*.svelte', '**/*.svelte.ts', '**/*.svelte.js'],
|
||||
languageOptions: {
|
||||
parserOptions: {
|
||||
projectService: true,
|
||||
extraFileExtensions: ['.svelte'],
|
||||
parser: ts.parser,
|
||||
svelteConfig
|
||||
@@ -72,6 +74,13 @@ export default defineConfig(
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
// E2E tests use console.log for diagnostic output — allow it there.
|
||||
files: ['e2e/**'],
|
||||
rules: {
|
||||
'no-console': 'off'
|
||||
}
|
||||
},
|
||||
{
|
||||
files: ['**/*.spec.ts', '**/*.test.ts'],
|
||||
rules: {
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
"error_file_too_large": "Die Datei ist zu groß (max. 50 MB).",
|
||||
"error_user_not_found": "Der Benutzer wurde nicht gefunden.",
|
||||
"error_import_already_running": "Ein Import läuft bereits. Bitte warten Sie, bis dieser abgeschlossen ist.",
|
||||
"error_invalid_credentials": "E-Mail-Adresse oder Passwort ist falsch.",
|
||||
"error_session_expired": "Ihre Sitzung ist abgelaufen. Bitte melden Sie sich erneut an.",
|
||||
"error_session_expired_explainer": "Aus Sicherheitsgründen werden Sitzungen nach 8 Stunden Inaktivität automatisch beendet.",
|
||||
"error_unauthorized": "Sie sind nicht angemeldet.",
|
||||
"error_forbidden": "Sie haben keine Berechtigung für diese Aktion.",
|
||||
"error_validation_error": "Die Eingabe ist ungültig.",
|
||||
@@ -345,8 +348,11 @@
|
||||
"admin_system_import_btn_retry": "Erneut starten",
|
||||
"admin_system_import_status_idle": "Kein Import gestartet.",
|
||||
"admin_system_import_status_running": "Import läuft…",
|
||||
"admin_system_import_status_done": "Import abgeschlossen – {count} Dokumente verarbeitet.",
|
||||
"admin_system_import_status_failed": "Fehler: {message}",
|
||||
"admin_system_import_status_done": "Import abgeschlossen",
|
||||
"admin_system_import_status_done_label": "Dokumente verarbeitet",
|
||||
"admin_system_import_status_failed": "Import fehlgeschlagen",
|
||||
"admin_system_import_failed_no_spreadsheet": "Keine Tabellendatei gefunden.",
|
||||
"admin_system_import_failed_internal": "Interner Fehler beim Import.",
|
||||
"admin_system_thumbnails_heading": "Thumbnails erzeugen",
|
||||
"admin_system_thumbnails_description": "Erzeugt Vorschaubilder für Dokumente ohne Thumbnail (z. B. nach dem Massenimport).",
|
||||
"admin_system_thumbnails_btn_start": "Thumbnails erzeugen",
|
||||
@@ -470,7 +476,7 @@
|
||||
"dashboard_reader_stats_persons_short": "Pers.",
|
||||
"dashboard_reader_stats_stories_short": "Gesch.",
|
||||
"dashboard_reader_draft_meta": "Entwurf · zuletzt bearbeitet {relative}",
|
||||
"dashboard_resume_label": "Zuletzt geöffnet:",
|
||||
"dashboard_resume_label": "Weiter, wo du aufgehört hast",
|
||||
"dashboard_resume_fallback": "Unbekanntes Dokument",
|
||||
"doc_status_placeholder": "Platzhalter",
|
||||
"doc_status_uploaded": "Hochgeladen",
|
||||
@@ -703,6 +709,8 @@
|
||||
"error_invite_exhausted": "Dieser Einladungslink wurde bereits vollständig verwendet.",
|
||||
"error_invite_revoked": "Dieser Einladungslink wurde deaktiviert.",
|
||||
"error_invite_expired": "Dieser Einladungslink ist abgelaufen.",
|
||||
"error_group_has_active_invites": "Diese Gruppe kann nicht gelöscht werden, da sie in einer aktiven Einladung verwendet wird.",
|
||||
"error_group_not_found": "Die angegebene Gruppe existiert nicht.",
|
||||
"register_heading": "Konto erstellen",
|
||||
"register_subtext": "Du wurdest eingeladen, dem Familienarchiv beizutreten.",
|
||||
"register_label_first_name": "Vorname",
|
||||
@@ -762,22 +770,21 @@
|
||||
"admin_new_invite_prefill_last": "Nachname vorausfüllen (optional)",
|
||||
"admin_new_invite_prefill_email": "E-Mail vorausfüllen (optional)",
|
||||
"admin_new_invite_expires": "Ablaufdatum (optional)",
|
||||
"admin_new_invite_groups": "Gruppen (optional)",
|
||||
"admin_new_invite_no_groups": "Keine Gruppen vorhanden.",
|
||||
"admin_invite_groups_load_error": "Gruppen konnten nicht geladen werden. Die Einladung kann ohne Gruppenauswahl erstellt werden.",
|
||||
"admin_invite_created_title": "Einladung erstellt",
|
||||
"admin_invite_created_desc": "Teile diesen Link mit der einzuladenden Person:",
|
||||
"admin_invite_revoke_confirm": "Einladung wirklich widerrufen?",
|
||||
|
||||
"greeting_morning": "Guten Morgen, {name}.",
|
||||
"greeting_day": "Hallo, {name}.",
|
||||
"greeting_evening": "Guten Abend, {name}.",
|
||||
|
||||
"dashboard_resume_label": "Weiter, wo du aufgehört hast",
|
||||
"dashboard_blocks": "{count} Abschnitte",
|
||||
"dashboard_resume_cta": "Weitertranskribieren",
|
||||
"dashboard_resume_other": "oder anderen Brief wählen",
|
||||
"dashboard_empty_title": "Noch kein Dokument begonnen",
|
||||
"dashboard_empty_body": "Wähle ein Dokument aus dem Archiv, um mit der Transkription zu beginnen.",
|
||||
"dashboard_empty_cta": "Zum Archiv",
|
||||
|
||||
"dashboard_mission_caption": "Offene Aufgaben",
|
||||
"queue_segment": "Segmentieren",
|
||||
"queue_segment_blurb": "Seiten aufteilen",
|
||||
@@ -787,7 +794,6 @@
|
||||
"queue_review_blurb": "Texte kontrollieren",
|
||||
"queue_n_open": "{n} offen",
|
||||
"queue_show_all": "Alle anzeigen →",
|
||||
|
||||
"pulse_eyebrow": "Diese Woche",
|
||||
"pulse_headline": "Ihr habt {pages} Seiten bearbeitet.",
|
||||
"pulse_you": "Du selbst hast {pages} davon bearbeitet.",
|
||||
@@ -795,19 +801,15 @@
|
||||
"pulse_transcribed": "Textstellen markiert",
|
||||
"pulse_reviewed": "Textstellen transkribiert",
|
||||
"pulse_uploaded": "Dokumente hochgeladen",
|
||||
|
||||
"feed_caption": "Kommentare & Aktivität",
|
||||
"feed_show_all": "Alle anzeigen",
|
||||
"feed_for_you": "für dich",
|
||||
|
||||
"audit_action_text_saved": "hat Text gespeichert in",
|
||||
"audit_action_file_uploaded": "hat eine Datei hochgeladen:",
|
||||
"audit_action_annotation_created": "hat eine Markierung erstellt in",
|
||||
"audit_action_comment_added": "hat kommentiert:",
|
||||
"audit_action_mention_created": "hat dich erwähnt in",
|
||||
|
||||
"dropzone_release": "Loslassen zum Hochladen",
|
||||
|
||||
"chronik_page_title": "Aktivitäten",
|
||||
"chronik_for_you_caption": "Für dich",
|
||||
"chronik_for_you_count": "{count} neu",
|
||||
@@ -851,9 +853,7 @@
|
||||
"pagination_page_of": "Seite {page} von {total}",
|
||||
"pagination_nav_label": "Seitennavigation",
|
||||
"pagination_page_button": "Seite {page}",
|
||||
|
||||
"common_opens_new_tab": "(öffnet in neuem Tab)",
|
||||
|
||||
"transcribe_coach_title": "Erste Transkription?",
|
||||
"transcribe_coach_preamble": "Unser Kurrent-Erkenner lernt noch. Jede Transkription, die Sie zum Training freigeben, bringt ihm die Schrift bei — so funktioniert's:",
|
||||
"transcribe_coach_step_1_title": "Rahmen ziehen.",
|
||||
@@ -863,10 +863,8 @@
|
||||
"transcribe_coach_step_3_title": "Speichert automatisch.",
|
||||
"transcribe_coach_footer_kurrent": "Hilfe zu Kurrent ↗",
|
||||
"transcribe_coach_footer_richtlinien": "Transkriptions-Richtlinien ↗",
|
||||
|
||||
"transcription_mode_help_label": "Lese- und Bearbeitungsmodus",
|
||||
"transcription_mode_help_body": "Lesen zeigt die Transkription als fließenden Text. Bearbeiten öffnet die Textfelder für jede Passage.",
|
||||
|
||||
"richtlinien_title": "Transkriptions-Richtlinien",
|
||||
"richtlinien_intro": "Damit alle Briefe einheitlich transkribiert werden — egal wer tippt — hier unsere Regeln. Die Seite wächst mit: sobald wir eine neue Konvention beschließen, landet sie hier.",
|
||||
"richtlinien_wiki_text": "Kurrent- und Sütterlin-Alphabete sind bei Wikipedia gut erklärt. Hier stehen nur unsere eigenen Vereinbarungen für dieses Archiv.",
|
||||
@@ -940,12 +938,9 @@
|
||||
"bulk_edit_all_x_failed": "Filter konnte nicht abgerufen werden — bitte erneut versuchen.",
|
||||
"bulk_edit_topbar_title": "Massenbearbeitung",
|
||||
"bulk_edit_count_pill": "{count} werden bearbeitet",
|
||||
|
||||
"nav_stammbaum": "Stammbaum",
|
||||
"nav_geschichten": "Geschichten",
|
||||
|
||||
"error_geschichte_not_found": "Die Geschichte wurde nicht gefunden.",
|
||||
|
||||
"geschichten_index_title": "Geschichten",
|
||||
"geschichten_new_button": "Neue Geschichte",
|
||||
"geschichten_filter_all_pill": "Alle",
|
||||
@@ -965,7 +960,6 @@
|
||||
"geschichten_card_attach_action": "+ Geschichte anhängen",
|
||||
"geschichten_card_show_all_for_person": "Alle Geschichten zu {name}",
|
||||
"geschichten_card_show_all": "Alle anzeigen",
|
||||
|
||||
"geschichte_editor_title_placeholder": "Titel der Geschichte",
|
||||
"geschichte_editor_body_placeholder": "Schreibe hier deine Geschichte…",
|
||||
"geschichte_editor_status_draft": "ENTWURF",
|
||||
@@ -992,14 +986,11 @@
|
||||
"geschichte_editor_toolbar_h3": "Unterüberschrift",
|
||||
"geschichte_editor_toolbar_ul": "Aufzählung",
|
||||
"geschichte_editor_toolbar_ol": "Nummerierte Liste",
|
||||
|
||||
"geschichte_delete_confirm_title": "Geschichte löschen?",
|
||||
"geschichte_delete_confirm_body": "Diese Aktion kann nicht rückgängig gemacht werden. Die Geschichte wird dauerhaft gelöscht und aus allen verlinkten Personen- und Dokumentseiten entfernt.",
|
||||
|
||||
"error_relationship_not_found": "Die Beziehung wurde nicht gefunden.",
|
||||
"error_circular_relationship": "Diese Beziehung würde einen Kreis erzeugen.",
|
||||
"error_duplicate_relationship": "Diese Beziehung gibt es bereits.",
|
||||
|
||||
"relation_parent_of": "Elternteil von",
|
||||
"relation_child_of": "Kind von",
|
||||
"relation_spouse_of": "Ehegatte",
|
||||
@@ -1010,7 +1001,6 @@
|
||||
"relation_doctor": "Arzt",
|
||||
"relation_neighbor": "Nachbar",
|
||||
"relation_other": "Sonstige",
|
||||
|
||||
"relation_inferred_parent": "Elternteil",
|
||||
"relation_inferred_child": "Kind",
|
||||
"relation_inferred_spouse": "Ehegatte",
|
||||
@@ -1028,9 +1018,7 @@
|
||||
"relation_inferred_sibling_inlaw": "Schwager/Schwägerin",
|
||||
"relation_inferred_cousin_1": "Cousin/Cousine",
|
||||
"relation_inferred_distant": "Weitläufige Verwandtschaft",
|
||||
|
||||
"doc_details_field_relationship": "Verwandtschaft",
|
||||
|
||||
"stammbaum_empty_heading": "Noch keine Familienmitglieder",
|
||||
"stammbaum_empty_body": "Markiere Personen auf ihrer Bearbeitungsseite als Familienmitglied, damit sie hier erscheinen.",
|
||||
"stammbaum_empty_link": "→ Zur Personenliste",
|
||||
@@ -1042,7 +1030,6 @@
|
||||
"stammbaum_zoom_in": "Vergrößern",
|
||||
"stammbaum_zoom_out": "Verkleinern",
|
||||
"stammbaum_generations": "Generationen",
|
||||
|
||||
"relation_error_duplicate": "Diese Beziehung gibt es bereits.",
|
||||
"relation_error_circular": "Diese Beziehung würde einen Kreis erzeugen.",
|
||||
"relation_error_self": "Eine Person kann nicht mit sich selbst verbunden werden.",
|
||||
@@ -1065,14 +1052,15 @@
|
||||
"relation_form_field_from_year": "Von Jahr",
|
||||
"relation_form_field_to_year": "Bis Jahr",
|
||||
"relation_form_year_placeholder": "z.B. 1920",
|
||||
|
||||
"person_relationships_heading": "Beziehungen",
|
||||
"person_relationships_empty": "Noch keine Beziehungen bekannt.",
|
||||
|
||||
"timeline_aria_label": "Zeitachse Dokumentdichte",
|
||||
"timeline_clear_selection": "Auswahl zurücksetzen",
|
||||
"timeline_zoom_reset": "Zurück zur Übersicht",
|
||||
"timeline_bar_aria_singular": "{when}, 1 Dokument",
|
||||
"timeline_bar_aria_plural": "{when}, {count} Dokumente",
|
||||
"timeline_dragging_aria_live": "Zeitraum {from} bis {to} ausgewählt"
|
||||
"timeline_dragging_aria_live": "Zeitraum {from} bis {to} ausgewählt",
|
||||
"error_page_id_label": "Fehler-ID",
|
||||
"error_copy_id_label": "ID kopieren",
|
||||
"error_copied": "Kopiert!"
|
||||
}
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
"error_file_too_large": "The file is too large (max. 50 MB).",
|
||||
"error_user_not_found": "User not found.",
|
||||
"error_import_already_running": "An import is already running. Please wait for it to finish.",
|
||||
"error_invalid_credentials": "Email address or password is incorrect.",
|
||||
"error_session_expired": "Your session has expired. Please sign in again.",
|
||||
"error_session_expired_explainer": "For security reasons, sessions are automatically ended after 8 hours of inactivity.",
|
||||
"error_unauthorized": "You are not logged in.",
|
||||
"error_forbidden": "You do not have permission for this action.",
|
||||
"error_validation_error": "The input is invalid.",
|
||||
@@ -345,8 +348,11 @@
|
||||
"admin_system_import_btn_retry": "Start again",
|
||||
"admin_system_import_status_idle": "No import started.",
|
||||
"admin_system_import_status_running": "Import running…",
|
||||
"admin_system_import_status_done": "Import complete – {count} documents processed.",
|
||||
"admin_system_import_status_failed": "Error: {message}",
|
||||
"admin_system_import_status_done": "Import complete",
|
||||
"admin_system_import_status_done_label": "Documents processed",
|
||||
"admin_system_import_status_failed": "Import failed",
|
||||
"admin_system_import_failed_no_spreadsheet": "No spreadsheet file found.",
|
||||
"admin_system_import_failed_internal": "Import failed due to an internal error.",
|
||||
"admin_system_thumbnails_heading": "Generate thumbnails",
|
||||
"admin_system_thumbnails_description": "Generates preview images for documents without a thumbnail (e.g. after the mass import).",
|
||||
"admin_system_thumbnails_btn_start": "Generate thumbnails",
|
||||
@@ -470,7 +476,7 @@
|
||||
"dashboard_reader_stats_persons_short": "Pers.",
|
||||
"dashboard_reader_stats_stories_short": "Stor.",
|
||||
"dashboard_reader_draft_meta": "Draft · last edited {relative}",
|
||||
"dashboard_resume_label": "Last opened:",
|
||||
"dashboard_resume_label": "Continue where you left off",
|
||||
"dashboard_resume_fallback": "Unknown document",
|
||||
"doc_status_placeholder": "Placeholder",
|
||||
"doc_status_uploaded": "Uploaded",
|
||||
@@ -703,6 +709,8 @@
|
||||
"error_invite_exhausted": "This invite link has already been fully used.",
|
||||
"error_invite_revoked": "This invite link has been deactivated.",
|
||||
"error_invite_expired": "This invite link has expired.",
|
||||
"error_group_has_active_invites": "This group cannot be deleted because it is referenced by one or more active invite links.",
|
||||
"error_group_not_found": "The specified group does not exist.",
|
||||
"register_heading": "Create account",
|
||||
"register_subtext": "You've been invited to join Familienarchiv.",
|
||||
"register_label_first_name": "First name",
|
||||
@@ -762,22 +770,21 @@
|
||||
"admin_new_invite_prefill_last": "Pre-fill last name (optional)",
|
||||
"admin_new_invite_prefill_email": "Pre-fill email (optional)",
|
||||
"admin_new_invite_expires": "Expiry date (optional)",
|
||||
"admin_new_invite_groups": "Groups (optional)",
|
||||
"admin_new_invite_no_groups": "No groups exist.",
|
||||
"admin_invite_groups_load_error": "Groups could not be loaded. The invite can still be created without group assignment.",
|
||||
"admin_invite_created_title": "Invite created",
|
||||
"admin_invite_created_desc": "Share this link with the person you are inviting:",
|
||||
"admin_invite_revoke_confirm": "Really revoke this invite?",
|
||||
|
||||
"greeting_morning": "Good morning, {name}.",
|
||||
"greeting_day": "Hello, {name}.",
|
||||
"greeting_evening": "Good evening, {name}.",
|
||||
|
||||
"dashboard_resume_label": "Continue where you left off",
|
||||
"dashboard_blocks": "{count} sections",
|
||||
"dashboard_resume_cta": "Continue transcribing",
|
||||
"dashboard_resume_other": "or choose another document",
|
||||
"dashboard_empty_title": "No document started yet",
|
||||
"dashboard_empty_body": "Choose a document from the archive to start transcribing.",
|
||||
"dashboard_empty_cta": "To the archive",
|
||||
|
||||
"dashboard_mission_caption": "Open tasks",
|
||||
"queue_segment": "Segment",
|
||||
"queue_segment_blurb": "Split pages",
|
||||
@@ -787,7 +794,6 @@
|
||||
"queue_review_blurb": "Check texts",
|
||||
"queue_n_open": "{n} open",
|
||||
"queue_show_all": "Show all →",
|
||||
|
||||
"pulse_eyebrow": "This week",
|
||||
"pulse_headline": "You have worked on {pages} pages.",
|
||||
"pulse_you": "You personally worked on {pages} of them.",
|
||||
@@ -795,19 +801,15 @@
|
||||
"pulse_transcribed": "Passages annotated",
|
||||
"pulse_reviewed": "Passages transcribed",
|
||||
"pulse_uploaded": "Documents uploaded",
|
||||
|
||||
"feed_caption": "Comments & activity",
|
||||
"feed_show_all": "Show all",
|
||||
"feed_for_you": "for you",
|
||||
|
||||
"audit_action_text_saved": "saved text in",
|
||||
"audit_action_file_uploaded": "uploaded a file:",
|
||||
"audit_action_annotation_created": "created an annotation in",
|
||||
"audit_action_comment_added": "commented:",
|
||||
"audit_action_mention_created": "mentioned you in",
|
||||
|
||||
"dropzone_release": "Release to upload",
|
||||
|
||||
"chronik_page_title": "Activity",
|
||||
"chronik_for_you_caption": "For you",
|
||||
"chronik_for_you_count": "{count} new",
|
||||
@@ -851,9 +853,7 @@
|
||||
"pagination_page_of": "Page {page} of {total}",
|
||||
"pagination_nav_label": "Pagination",
|
||||
"pagination_page_button": "Page {page}",
|
||||
|
||||
"common_opens_new_tab": "(opens in new tab)",
|
||||
|
||||
"transcribe_coach_title": "First transcription?",
|
||||
"transcribe_coach_preamble": "Our Kurrent recogniser is still learning. Every transcription you release for training teaches it the handwriting — here's how it works:",
|
||||
"transcribe_coach_step_1_title": "Draw a frame.",
|
||||
@@ -863,10 +863,8 @@
|
||||
"transcribe_coach_step_3_title": "Saves automatically.",
|
||||
"transcribe_coach_footer_kurrent": "Kurrent help ↗",
|
||||
"transcribe_coach_footer_richtlinien": "Transcription guidelines ↗",
|
||||
|
||||
"transcription_mode_help_label": "Read and edit mode",
|
||||
"transcription_mode_help_body": "Read shows the transcription as flowing text. Edit opens the text fields for each passage.",
|
||||
|
||||
"richtlinien_title": "Transcription Guidelines",
|
||||
"richtlinien_intro": "So every letter is transcribed consistently — no matter who types — here are our rules. The page grows with us: as soon as we agree a new convention, it lands here.",
|
||||
"richtlinien_wiki_text": "The Kurrent and Sütterlin alphabets are well explained on Wikipedia. Here you'll only find our own conventions for this archive.",
|
||||
@@ -940,12 +938,9 @@
|
||||
"bulk_edit_all_x_failed": "Could not load filter results — please retry.",
|
||||
"bulk_edit_topbar_title": "Bulk edit",
|
||||
"bulk_edit_count_pill": "{count} will be edited",
|
||||
|
||||
"nav_stammbaum": "Family tree",
|
||||
"nav_geschichten": "Stories",
|
||||
|
||||
"error_geschichte_not_found": "The story was not found.",
|
||||
|
||||
"geschichten_index_title": "Stories",
|
||||
"geschichten_new_button": "New story",
|
||||
"geschichten_filter_all_pill": "All",
|
||||
@@ -965,7 +960,6 @@
|
||||
"geschichten_card_attach_action": "+ Attach a story",
|
||||
"geschichten_card_show_all_for_person": "All stories about {name}",
|
||||
"geschichten_card_show_all": "Show all",
|
||||
|
||||
"geschichte_editor_title_placeholder": "Story title",
|
||||
"geschichte_editor_body_placeholder": "Write your story here…",
|
||||
"geschichte_editor_status_draft": "DRAFT",
|
||||
@@ -992,14 +986,11 @@
|
||||
"geschichte_editor_toolbar_h3": "Subheading",
|
||||
"geschichte_editor_toolbar_ul": "Bulleted list",
|
||||
"geschichte_editor_toolbar_ol": "Numbered list",
|
||||
|
||||
"geschichte_delete_confirm_title": "Delete story?",
|
||||
"geschichte_delete_confirm_body": "This action cannot be undone. The story will be permanently deleted and removed from all linked person and document pages.",
|
||||
|
||||
"error_relationship_not_found": "Relationship not found.",
|
||||
"error_circular_relationship": "This relationship would form a cycle.",
|
||||
"error_duplicate_relationship": "This relationship already exists.",
|
||||
|
||||
"relation_parent_of": "Parent of",
|
||||
"relation_child_of": "Child of",
|
||||
"relation_spouse_of": "Spouse",
|
||||
@@ -1010,7 +1001,6 @@
|
||||
"relation_doctor": "Doctor",
|
||||
"relation_neighbor": "Neighbour",
|
||||
"relation_other": "Other",
|
||||
|
||||
"relation_inferred_parent": "Parent",
|
||||
"relation_inferred_child": "Child",
|
||||
"relation_inferred_spouse": "Spouse",
|
||||
@@ -1028,9 +1018,7 @@
|
||||
"relation_inferred_sibling_inlaw": "Sibling-in-law",
|
||||
"relation_inferred_cousin_1": "Cousin",
|
||||
"relation_inferred_distant": "Distant relative",
|
||||
|
||||
"doc_details_field_relationship": "Relationship",
|
||||
|
||||
"stammbaum_empty_heading": "No family members yet",
|
||||
"stammbaum_empty_body": "Mark a person as a family member on their edit page so they appear here.",
|
||||
"stammbaum_empty_link": "→ Go to person list",
|
||||
@@ -1042,7 +1030,6 @@
|
||||
"stammbaum_zoom_in": "Zoom in",
|
||||
"stammbaum_zoom_out": "Zoom out",
|
||||
"stammbaum_generations": "Generations",
|
||||
|
||||
"relation_error_duplicate": "This relationship already exists.",
|
||||
"relation_error_circular": "This relationship would form a cycle.",
|
||||
"relation_error_self": "A person cannot be related to themselves.",
|
||||
@@ -1065,14 +1052,15 @@
|
||||
"relation_form_field_from_year": "From year",
|
||||
"relation_form_field_to_year": "To year",
|
||||
"relation_form_year_placeholder": "e.g. 1920",
|
||||
|
||||
"person_relationships_heading": "Relationships",
|
||||
"person_relationships_empty": "No relationships known yet.",
|
||||
|
||||
"timeline_aria_label": "Document density timeline",
|
||||
"timeline_clear_selection": "Clear selection",
|
||||
"timeline_zoom_reset": "Reset zoom",
|
||||
"timeline_bar_aria_singular": "{when}, 1 document",
|
||||
"timeline_bar_aria_plural": "{when}, {count} documents",
|
||||
"timeline_dragging_aria_live": "Range {from} to {to} selected"
|
||||
"timeline_dragging_aria_live": "Range {from} to {to} selected",
|
||||
"error_page_id_label": "Error ID",
|
||||
"error_copy_id_label": "Copy ID",
|
||||
"error_copied": "Copied!"
|
||||
}
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
"error_file_too_large": "El archivo es demasiado grande (máx. 50 MB).",
|
||||
"error_user_not_found": "Usuario no encontrado.",
|
||||
"error_import_already_running": "Ya hay una importación en curso. Por favor, espere a que finalice.",
|
||||
"error_invalid_credentials": "El correo electrónico o la contraseña son incorrectos.",
|
||||
"error_session_expired": "Su sesión ha expirado. Por favor, inicie sesión de nuevo.",
|
||||
"error_session_expired_explainer": "Por razones de seguridad, las sesiones se terminan automáticamente tras 8 horas de inactividad.",
|
||||
"error_unauthorized": "No ha iniciado sesión.",
|
||||
"error_forbidden": "No tiene permiso para realizar esta acción.",
|
||||
"error_validation_error": "La entrada no es válida.",
|
||||
@@ -345,8 +348,11 @@
|
||||
"admin_system_import_btn_retry": "Iniciar de nuevo",
|
||||
"admin_system_import_status_idle": "No hay importación iniciada.",
|
||||
"admin_system_import_status_running": "Importación en curso…",
|
||||
"admin_system_import_status_done": "Importación completada – {count} documentos procesados.",
|
||||
"admin_system_import_status_failed": "Error: {message}",
|
||||
"admin_system_import_status_done": "Importación completada",
|
||||
"admin_system_import_status_done_label": "Documentos procesados",
|
||||
"admin_system_import_status_failed": "Importación fallida",
|
||||
"admin_system_import_failed_no_spreadsheet": "No se encontró ninguna hoja de cálculo.",
|
||||
"admin_system_import_failed_internal": "Error interno durante la importación.",
|
||||
"admin_system_thumbnails_heading": "Generar miniaturas",
|
||||
"admin_system_thumbnails_description": "Genera imágenes de vista previa para documentos sin miniatura (p. ej. tras la importación masiva).",
|
||||
"admin_system_thumbnails_btn_start": "Generar miniaturas",
|
||||
@@ -470,7 +476,7 @@
|
||||
"dashboard_reader_stats_persons_short": "Pers.",
|
||||
"dashboard_reader_stats_stories_short": "Hist.",
|
||||
"dashboard_reader_draft_meta": "Borrador · editado hace {relative}",
|
||||
"dashboard_resume_label": "Último abierto:",
|
||||
"dashboard_resume_label": "Continuar donde lo dejaste",
|
||||
"dashboard_resume_fallback": "Documento desconocido",
|
||||
"doc_status_placeholder": "Marcador",
|
||||
"doc_status_uploaded": "Cargado",
|
||||
@@ -703,6 +709,8 @@
|
||||
"error_invite_exhausted": "Este enlace de invitación ya ha sido completamente utilizado.",
|
||||
"error_invite_revoked": "Este enlace de invitación ha sido desactivado.",
|
||||
"error_invite_expired": "Este enlace de invitación ha expirado.",
|
||||
"error_group_has_active_invites": "Este grupo no puede eliminarse porque está referenciado por uno o más enlaces de invitación activos.",
|
||||
"error_group_not_found": "El grupo especificado no existe.",
|
||||
"register_heading": "Crear cuenta",
|
||||
"register_subtext": "Has sido invitado a unirte al Familienarchiv.",
|
||||
"register_label_first_name": "Nombre",
|
||||
@@ -762,22 +770,21 @@
|
||||
"admin_new_invite_prefill_last": "Prellenar apellido (opcional)",
|
||||
"admin_new_invite_prefill_email": "Prellenar correo (opcional)",
|
||||
"admin_new_invite_expires": "Fecha de vencimiento (opcional)",
|
||||
"admin_new_invite_groups": "Grupos (opcional)",
|
||||
"admin_new_invite_no_groups": "No hay grupos disponibles.",
|
||||
"admin_invite_groups_load_error": "No se pudieron cargar los grupos. La invitación puede crearse sin asignar grupos.",
|
||||
"admin_invite_created_title": "Invitación creada",
|
||||
"admin_invite_created_desc": "Comparte este enlace con la persona invitada:",
|
||||
"admin_invite_revoke_confirm": "¿Realmente revocar esta invitación?",
|
||||
|
||||
"greeting_morning": "Buenos días, {name}.",
|
||||
"greeting_day": "Hola, {name}.",
|
||||
"greeting_evening": "Buenas noches, {name}.",
|
||||
|
||||
"dashboard_resume_label": "Continuar donde lo dejaste",
|
||||
"dashboard_blocks": "{count} secciones",
|
||||
"dashboard_resume_cta": "Continuar transcripción",
|
||||
"dashboard_resume_other": "o elige otro documento",
|
||||
"dashboard_empty_title": "Aún no has comenzado ningún documento",
|
||||
"dashboard_empty_body": "Elige un documento del archivo para empezar a transcribir.",
|
||||
"dashboard_empty_cta": "Al archivo",
|
||||
|
||||
"dashboard_mission_caption": "Tareas pendientes",
|
||||
"queue_segment": "Segmentar",
|
||||
"queue_segment_blurb": "Dividir páginas",
|
||||
@@ -787,7 +794,6 @@
|
||||
"queue_review_blurb": "Controlar textos",
|
||||
"queue_n_open": "{n} pendiente",
|
||||
"queue_show_all": "Ver todo →",
|
||||
|
||||
"pulse_eyebrow": "Esta semana",
|
||||
"pulse_headline": "Habéis trabajado {pages} páginas.",
|
||||
"pulse_you": "Tú mismo has trabajado {pages} de ellas.",
|
||||
@@ -795,19 +801,15 @@
|
||||
"pulse_transcribed": "Fragmentos anotados",
|
||||
"pulse_reviewed": "Fragmentos transcritos",
|
||||
"pulse_uploaded": "Documentos subidos",
|
||||
|
||||
"feed_caption": "Comentarios y actividad",
|
||||
"feed_show_all": "Ver todo",
|
||||
"feed_for_you": "para ti",
|
||||
|
||||
"audit_action_text_saved": "guardó texto en",
|
||||
"audit_action_file_uploaded": "subió un archivo:",
|
||||
"audit_action_annotation_created": "creó una anotación en",
|
||||
"audit_action_comment_added": "comentó:",
|
||||
"audit_action_mention_created": "te mencionó en",
|
||||
|
||||
"dropzone_release": "Suelta para subir",
|
||||
|
||||
"chronik_page_title": "Actividades",
|
||||
"chronik_for_you_caption": "Para ti",
|
||||
"chronik_for_you_count": "{count} nuevas",
|
||||
@@ -851,9 +853,7 @@
|
||||
"pagination_page_of": "Página {page} de {total}",
|
||||
"pagination_nav_label": "Paginación",
|
||||
"pagination_page_button": "Página {page}",
|
||||
|
||||
"common_opens_new_tab": "(abre en pestaña nueva)",
|
||||
|
||||
"transcribe_coach_title": "¿Primera transcripción?",
|
||||
"transcribe_coach_preamble": "Nuestro reconocedor de Kurrent aún está aprendiendo. Cada transcripción que libera para el entrenamiento le enseña la escritura — así funciona:",
|
||||
"transcribe_coach_step_1_title": "Dibujar un marco.",
|
||||
@@ -863,10 +863,8 @@
|
||||
"transcribe_coach_step_3_title": "Se guarda automáticamente.",
|
||||
"transcribe_coach_footer_kurrent": "Ayuda sobre Kurrent ↗",
|
||||
"transcribe_coach_footer_richtlinien": "Normas de transcripción ↗",
|
||||
|
||||
"transcription_mode_help_label": "Modo lectura y edición",
|
||||
"transcription_mode_help_body": "Lectura muestra la transcripción como texto continuo. Edición abre los campos de texto para cada pasaje.",
|
||||
|
||||
"richtlinien_title": "Normas de transcripción",
|
||||
"richtlinien_intro": "Para que todas las cartas se transcriban de forma uniforme — sin importar quién transcriba — aquí están nuestras reglas. La página crece con nosotros.",
|
||||
"richtlinien_wiki_text": "Los alfabetos Kurrent y Sütterlin están bien explicados en Wikipedia. Aquí solo se recogen nuestros propios acuerdos para este archivo.",
|
||||
@@ -940,12 +938,9 @@
|
||||
"bulk_edit_all_x_failed": "No se pudieron cargar los resultados del filtro; vuelve a intentarlo.",
|
||||
"bulk_edit_topbar_title": "Edición masiva",
|
||||
"bulk_edit_count_pill": "Se editarán {count}",
|
||||
|
||||
"nav_stammbaum": "Árbol genealógico",
|
||||
"nav_geschichten": "Historias",
|
||||
|
||||
"error_geschichte_not_found": "No se encontró la historia.",
|
||||
|
||||
"geschichten_index_title": "Historias",
|
||||
"geschichten_new_button": "Nueva historia",
|
||||
"geschichten_filter_all_pill": "Todas",
|
||||
@@ -965,7 +960,6 @@
|
||||
"geschichten_card_attach_action": "+ Adjuntar historia",
|
||||
"geschichten_card_show_all_for_person": "Todas las historias sobre {name}",
|
||||
"geschichten_card_show_all": "Mostrar todas",
|
||||
|
||||
"geschichte_editor_title_placeholder": "Título de la historia",
|
||||
"geschichte_editor_body_placeholder": "Escribe tu historia aquí…",
|
||||
"geschichte_editor_status_draft": "BORRADOR",
|
||||
@@ -992,14 +986,11 @@
|
||||
"geschichte_editor_toolbar_h3": "Subencabezado",
|
||||
"geschichte_editor_toolbar_ul": "Lista con viñetas",
|
||||
"geschichte_editor_toolbar_ol": "Lista numerada",
|
||||
|
||||
"geschichte_delete_confirm_title": "¿Eliminar historia?",
|
||||
"geschichte_delete_confirm_body": "Esta acción no se puede deshacer. La historia se eliminará permanentemente y se quitará de todas las páginas de personas y documentos vinculados.",
|
||||
|
||||
"error_relationship_not_found": "La relación no fue encontrada.",
|
||||
"error_circular_relationship": "Esta relación crearía un ciclo.",
|
||||
"error_duplicate_relationship": "Esta relación ya existe.",
|
||||
|
||||
"relation_parent_of": "Progenitor de",
|
||||
"relation_child_of": "Hijo/a de",
|
||||
"relation_spouse_of": "Cónyuge",
|
||||
@@ -1010,7 +1001,6 @@
|
||||
"relation_doctor": "Médico",
|
||||
"relation_neighbor": "Vecino/a",
|
||||
"relation_other": "Otro",
|
||||
|
||||
"relation_inferred_parent": "Progenitor",
|
||||
"relation_inferred_child": "Hijo/a",
|
||||
"relation_inferred_spouse": "Cónyuge",
|
||||
@@ -1028,9 +1018,7 @@
|
||||
"relation_inferred_sibling_inlaw": "Cuñado/a",
|
||||
"relation_inferred_cousin_1": "Primo/a",
|
||||
"relation_inferred_distant": "Pariente lejano",
|
||||
|
||||
"doc_details_field_relationship": "Parentesco",
|
||||
|
||||
"stammbaum_empty_heading": "Aún no hay miembros de la familia",
|
||||
"stammbaum_empty_body": "Marca a una persona como miembro de la familia en su página de edición para que aparezca aquí.",
|
||||
"stammbaum_empty_link": "→ Ir a la lista de personas",
|
||||
@@ -1042,7 +1030,6 @@
|
||||
"stammbaum_zoom_in": "Acercar",
|
||||
"stammbaum_zoom_out": "Alejar",
|
||||
"stammbaum_generations": "Generaciones",
|
||||
|
||||
"relation_error_duplicate": "Esta relación ya existe.",
|
||||
"relation_error_circular": "Esta relación crearía un ciclo.",
|
||||
"relation_error_self": "Una persona no puede estar relacionada consigo misma.",
|
||||
@@ -1065,14 +1052,15 @@
|
||||
"relation_form_field_from_year": "Desde año",
|
||||
"relation_form_field_to_year": "Hasta año",
|
||||
"relation_form_year_placeholder": "ej. 1920",
|
||||
|
||||
"person_relationships_heading": "Relaciones",
|
||||
"person_relationships_empty": "Aún no se conocen relaciones.",
|
||||
|
||||
"timeline_aria_label": "Cronología de densidad de documentos",
|
||||
"timeline_clear_selection": "Borrar selección",
|
||||
"timeline_zoom_reset": "Restablecer zoom",
|
||||
"timeline_bar_aria_singular": "{when}, 1 documento",
|
||||
"timeline_bar_aria_plural": "{when}, {count} documentos",
|
||||
"timeline_dragging_aria_live": "Rango {from} a {to} seleccionado"
|
||||
"timeline_dragging_aria_live": "Rango {from} a {to} seleccionado",
|
||||
"error_page_id_label": "ID de error",
|
||||
"error_copy_id_label": "Copiar ID",
|
||||
"error_copied": "¡Copiado!"
|
||||
}
|
||||
|
||||
1601
frontend/package-lock.json
generated
1601
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -16,13 +16,14 @@
|
||||
"lint:boundary-demo": "eslint src/lib/tag/__fixtures__/",
|
||||
"test:unit": "vitest",
|
||||
"test": "npm run test:unit -- --run",
|
||||
"test:coverage": "vitest run --coverage --project=server && vitest run -c vitest.client-coverage.config.ts --coverage",
|
||||
"test:coverage": "vitest run --coverage --project=server; vitest run -c vitest.client-coverage.config.ts --coverage",
|
||||
"test:e2e": "playwright test",
|
||||
"test:e2e:headed": "playwright test --headed",
|
||||
"test:e2e:ui": "playwright test --ui",
|
||||
"generate:api": "openapi-typescript http://localhost:8080/v3/api-docs -o ./src/lib/generated/api.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"@sentry/sveltekit": "^10.53.1",
|
||||
"@tiptap/core": "3.22.5",
|
||||
"@tiptap/extension-mention": "3.22.5",
|
||||
"@tiptap/starter-kit": "3.22.5",
|
||||
|
||||
5
frontend/src/app.d.ts
vendored
5
frontend/src/app.d.ts
vendored
@@ -26,6 +26,11 @@ declare global {
|
||||
interface PageData {
|
||||
user?: User; // Available in $page.data.user
|
||||
}
|
||||
|
||||
interface Error {
|
||||
message: string;
|
||||
errorId?: string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
47
frontend/src/hooks.client.test.ts
Normal file
47
frontend/src/hooks.client.test.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
vi.mock('@sentry/sveltekit', () => ({
|
||||
init: vi.fn(),
|
||||
handleErrorWithSentry: (fn: (args: unknown) => unknown) => fn,
|
||||
lastEventId: vi.fn(() => 'sentry-event-id-abc123')
|
||||
}));
|
||||
|
||||
describe('hooks.client handleError', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
it('returns Sentry lastEventId as errorId', async () => {
|
||||
const Sentry = await import('@sentry/sveltekit');
|
||||
vi.mocked(Sentry.lastEventId).mockReturnValue('sentry-event-id-abc123');
|
||||
|
||||
const { handleError } = await import('./hooks.client');
|
||||
const result = (handleError as (args: unknown) => { message: string; errorId: string })({
|
||||
error: new Error('boom'),
|
||||
event: {},
|
||||
status: 500,
|
||||
message: 'Internal Error'
|
||||
});
|
||||
|
||||
expect(result.errorId).toBe('sentry-event-id-abc123');
|
||||
expect(result.message).toBe('An unexpected error occurred');
|
||||
});
|
||||
|
||||
it('falls back to crypto.randomUUID when lastEventId returns undefined', async () => {
|
||||
const Sentry = await import('@sentry/sveltekit');
|
||||
vi.mocked(Sentry.lastEventId).mockReturnValue(undefined);
|
||||
|
||||
const { handleError } = await import('./hooks.client');
|
||||
const result = (handleError as (args: unknown) => { message: string; errorId: string })({
|
||||
error: new Error('boom'),
|
||||
event: {},
|
||||
status: 500,
|
||||
message: 'Internal Error'
|
||||
});
|
||||
|
||||
expect(result.errorId).toMatch(
|
||||
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/
|
||||
);
|
||||
expect(result.message).toBe('An unexpected error occurred');
|
||||
});
|
||||
});
|
||||
16
frontend/src/hooks.client.ts
Normal file
16
frontend/src/hooks.client.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import * as Sentry from '@sentry/sveltekit';
|
||||
|
||||
// VITE_SENTRY_DSN is a write-only ingest key — it can POST events to GlitchTip
|
||||
// but cannot read them. Safe to include in the client bundle per Sentry security model.
|
||||
Sentry.init({
|
||||
dsn: import.meta.env.VITE_SENTRY_DSN,
|
||||
environment: import.meta.env.MODE,
|
||||
tracesSampleRate: 0.1,
|
||||
sendDefaultPii: false,
|
||||
enabled: !!import.meta.env.VITE_SENTRY_DSN
|
||||
});
|
||||
|
||||
export const handleError = Sentry.handleErrorWithSentry(() => {
|
||||
const errorId = Sentry.lastEventId() ?? crypto.randomUUID();
|
||||
return { message: 'An unexpected error occurred', errorId };
|
||||
});
|
||||
58
frontend/src/hooks.server.test.ts
Normal file
58
frontend/src/hooks.server.test.ts
Normal file
@@ -0,0 +1,58 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
vi.mock('@sentry/sveltekit', () => ({
|
||||
init: vi.fn(),
|
||||
handleErrorWithSentry: (fn: (args: unknown) => unknown) => fn,
|
||||
lastEventId: vi.fn(() => 'sentry-event-id-abc123')
|
||||
}));
|
||||
|
||||
vi.mock('@sveltejs/kit', () => ({ redirect: vi.fn() }));
|
||||
vi.mock('@sveltejs/kit/hooks', () => ({ sequence: vi.fn((...fns: unknown[]) => fns[0]) }));
|
||||
vi.mock('$lib/paraglide/server', () => ({ paraglideMiddleware: vi.fn() }));
|
||||
vi.mock('$lib/paraglide/runtime', () => ({ cookieName: 'locale', cookieMaxAge: 86400 }));
|
||||
vi.mock('$lib/shared/server/locale', () => ({ detectLocale: vi.fn(() => 'de') }));
|
||||
|
||||
const makeEvent = () => ({
|
||||
url: { pathname: '/documents/123' },
|
||||
locals: {}
|
||||
});
|
||||
|
||||
describe('hooks.server handleError', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
it('returns Sentry lastEventId as errorId', async () => {
|
||||
const Sentry = await import('@sentry/sveltekit');
|
||||
vi.mocked(Sentry.lastEventId).mockReturnValue('sentry-event-id-abc123');
|
||||
|
||||
const { handleError } = await import('./hooks.server');
|
||||
const result = (handleError as (args: unknown) => { message: string; errorId: string })({
|
||||
error: new Error('boom'),
|
||||
event: makeEvent(),
|
||||
status: 500,
|
||||
message: 'Internal Error'
|
||||
});
|
||||
|
||||
expect(result.errorId).toBe('sentry-event-id-abc123');
|
||||
expect(result.message).toBe('An unexpected error occurred');
|
||||
});
|
||||
|
||||
it('falls back to crypto.randomUUID when lastEventId returns undefined', async () => {
|
||||
const Sentry = await import('@sentry/sveltekit');
|
||||
vi.mocked(Sentry.lastEventId).mockReturnValue(undefined);
|
||||
|
||||
const { handleError } = await import('./hooks.server');
|
||||
const result = (handleError as (args: unknown) => { message: string; errorId: string })({
|
||||
error: new Error('boom'),
|
||||
event: makeEvent(),
|
||||
status: 500,
|
||||
message: 'Internal Error'
|
||||
});
|
||||
|
||||
expect(result.errorId).toMatch(
|
||||
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/
|
||||
);
|
||||
expect(result.message).toBe('An unexpected error occurred');
|
||||
});
|
||||
});
|
||||
@@ -1,3 +1,4 @@
|
||||
import * as Sentry from '@sentry/sveltekit';
|
||||
import { redirect, type Handle, type HandleFetch } from '@sveltejs/kit';
|
||||
import { paraglideMiddleware } from '$lib/paraglide/server';
|
||||
import { sequence } from '@sveltejs/kit/hooks';
|
||||
@@ -5,6 +6,16 @@ import { env } from 'process';
|
||||
import { cookieName, cookieMaxAge } from '$lib/paraglide/runtime';
|
||||
import { detectLocale } from '$lib/shared/server/locale';
|
||||
|
||||
// VITE_SENTRY_DSN is a write-only ingest key — it can POST events to GlitchTip
|
||||
// but cannot read them. Safe to include in the client bundle per Sentry security model.
|
||||
Sentry.init({
|
||||
dsn: import.meta.env.VITE_SENTRY_DSN,
|
||||
environment: import.meta.env.MODE,
|
||||
tracesSampleRate: 0.1,
|
||||
sendDefaultPii: false,
|
||||
enabled: !!import.meta.env.VITE_SENTRY_DSN
|
||||
});
|
||||
|
||||
const PUBLIC_PATHS = [
|
||||
'/login',
|
||||
'/logout',
|
||||
@@ -47,21 +58,40 @@ const handleParaglide: Handle = ({ event, resolve }) =>
|
||||
});
|
||||
|
||||
const userGroup: Handle = async ({ event, resolve }) => {
|
||||
const auth = event.cookies.get('auth_token');
|
||||
// One-off cleanup of the legacy Basic-credentials cookie from before the Spring Session migration (#523).
|
||||
if (event.cookies.get('auth_token')) {
|
||||
event.cookies.delete('auth_token', { path: '/' });
|
||||
}
|
||||
|
||||
if (auth) {
|
||||
try {
|
||||
const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
|
||||
const response = await fetch(`${apiUrl}/api/users/me`, {
|
||||
headers: { Authorization: auth }
|
||||
});
|
||||
if (response.ok) {
|
||||
const user = await response.json();
|
||||
event.locals.user = user;
|
||||
const sessionId = event.cookies.get('fa_session');
|
||||
if (!sessionId) {
|
||||
return resolve(event);
|
||||
}
|
||||
|
||||
try {
|
||||
const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
|
||||
const response = await fetch(`${apiUrl}/api/users/me`, {
|
||||
headers: { Cookie: `fa_session=${sessionId}` }
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
event.locals.user = await response.json();
|
||||
} else if (response.status === 401) {
|
||||
// Backend rejected the session (expired or invalidated). Drop the stale
|
||||
// cookie and surface the reason on the login page. PUBLIC_PATHS check
|
||||
// avoids a redirect loop if the user is already on /login.
|
||||
event.cookies.delete('fa_session', { path: '/' });
|
||||
const isPublic = PUBLIC_PATHS.some((p) => event.url.pathname.startsWith(p));
|
||||
if (!isPublic) {
|
||||
throw redirect(302, '/login?reason=expired');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching user in hook:', error);
|
||||
}
|
||||
} catch (error) {
|
||||
// Don't swallow SvelteKit redirects — they're thrown as objects with a `status` field.
|
||||
if (error instanceof Object && 'status' in error && 'location' in error) {
|
||||
throw error;
|
||||
}
|
||||
console.error('Error fetching user in hook:', error);
|
||||
}
|
||||
|
||||
return resolve(event);
|
||||
@@ -72,14 +102,11 @@ export const handleFetch: HandleFetch = async ({ event, request, fetch }) => {
|
||||
const isApi = request.url.startsWith(apiUrl) || request.url.includes('/api/');
|
||||
|
||||
if (isApi) {
|
||||
// If the request already carries an explicit Authorization header (e.g. the
|
||||
// login action sends Basic auth), pass it through unchanged.
|
||||
if (request.headers.has('Authorization')) {
|
||||
return fetch(request);
|
||||
}
|
||||
|
||||
// Password reset endpoints are public — no auth header needed.
|
||||
// Auth endpoints that establish/check their own credentials manage cookies themselves;
|
||||
// don't double-inject a stale fa_session.
|
||||
const PUBLIC_API_PATHS = [
|
||||
'/api/auth/login',
|
||||
'/api/auth/logout',
|
||||
'/api/auth/forgot-password',
|
||||
'/api/auth/reset-password',
|
||||
'/api/auth/invite/',
|
||||
@@ -89,27 +116,28 @@ export const handleFetch: HandleFetch = async ({ event, request, fetch }) => {
|
||||
return fetch(request);
|
||||
}
|
||||
|
||||
const token = event.cookies.get('auth_token');
|
||||
|
||||
if (!token) {
|
||||
const sessionId = event.cookies.get('fa_session');
|
||||
if (!sessionId) {
|
||||
return new Response('Unauthorized', { status: 401 });
|
||||
}
|
||||
|
||||
// Clone the request first to preserve the body
|
||||
const clonedRequest = request.clone();
|
||||
|
||||
// Create new request with Authorization header and preserved body
|
||||
const modifiedRequest = new Request(clonedRequest, {
|
||||
// Clone first so the body stream is preserved on the new Request.
|
||||
const cloned = request.clone();
|
||||
const modified = new Request(cloned, {
|
||||
headers: {
|
||||
...Object.fromEntries(clonedRequest.headers),
|
||||
Authorization: token
|
||||
...Object.fromEntries(cloned.headers),
|
||||
Cookie: `fa_session=${sessionId}`
|
||||
}
|
||||
});
|
||||
|
||||
return fetch(modifiedRequest);
|
||||
return fetch(modified);
|
||||
}
|
||||
|
||||
return fetch(request);
|
||||
};
|
||||
|
||||
export const handle = sequence(userGroup, handleAuth, handleLocaleDetection, handleParaglide);
|
||||
|
||||
export const handleError = Sentry.handleErrorWithSentry(() => {
|
||||
const errorId = Sentry.lastEventId() ?? crypto.randomUUID();
|
||||
return { message: 'An unexpected error occurred', errorId };
|
||||
});
|
||||
|
||||
@@ -22,6 +22,8 @@ export type ErrorCode =
|
||||
| 'INVITE_EXHAUSTED'
|
||||
| 'INVITE_REVOKED'
|
||||
| 'INVITE_EXPIRED'
|
||||
| 'GROUP_HAS_ACTIVE_INVITES'
|
||||
| 'GROUP_NOT_FOUND'
|
||||
| 'ANNOTATION_NOT_FOUND'
|
||||
| 'ANNOTATION_UPDATE_FAILED'
|
||||
| 'TRANSCRIPTION_BLOCK_NOT_FOUND'
|
||||
@@ -42,6 +44,8 @@ export type ErrorCode =
|
||||
| 'CIRCULAR_RELATIONSHIP'
|
||||
| 'DUPLICATE_RELATIONSHIP'
|
||||
| 'GESCHICHTE_NOT_FOUND'
|
||||
| 'INVALID_CREDENTIALS'
|
||||
| 'SESSION_EXPIRED'
|
||||
| 'MISSING_CREDENTIALS'
|
||||
| 'UNAUTHORIZED'
|
||||
| 'FORBIDDEN'
|
||||
@@ -108,6 +112,10 @@ export function getErrorMessage(code: ErrorCode | string | undefined): string {
|
||||
return m.error_invite_revoked();
|
||||
case 'INVITE_EXPIRED':
|
||||
return m.error_invite_expired();
|
||||
case 'GROUP_HAS_ACTIVE_INVITES':
|
||||
return m.error_group_has_active_invites();
|
||||
case 'GROUP_NOT_FOUND':
|
||||
return m.error_group_not_found();
|
||||
case 'ANNOTATION_NOT_FOUND':
|
||||
return m.error_annotation_not_found();
|
||||
case 'ANNOTATION_UPDATE_FAILED':
|
||||
@@ -148,6 +156,10 @@ export function getErrorMessage(code: ErrorCode | string | undefined): string {
|
||||
return m.error_duplicate_relationship();
|
||||
case 'GESCHICHTE_NOT_FOUND':
|
||||
return m.error_geschichte_not_found();
|
||||
case 'INVALID_CREDENTIALS':
|
||||
return m.error_invalid_credentials();
|
||||
case 'SESSION_EXPIRED':
|
||||
return m.error_session_expired();
|
||||
case 'MISSING_CREDENTIALS':
|
||||
return m.login_error_missing_credentials();
|
||||
case 'UNAUTHORIZED':
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
<script lang="ts">
|
||||
import { untrack } from 'svelte';
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
|
||||
let {
|
||||
groups,
|
||||
selectedGroupIds = []
|
||||
@@ -7,12 +10,13 @@ let {
|
||||
selectedGroupIds?: string[];
|
||||
} = $props();
|
||||
|
||||
let selected = $derived([...selectedGroupIds]);
|
||||
let selected = $state<string[]>(untrack(() => [...selectedGroupIds]));
|
||||
</script>
|
||||
|
||||
<div class="flex flex-wrap gap-3">
|
||||
<fieldset class="flex flex-wrap gap-3 border-none p-0">
|
||||
<legend class="sr-only">{m.admin_new_invite_groups()}</legend>
|
||||
{#each groups as group (group.id)}
|
||||
<label class="inline-flex items-center gap-2 text-sm text-ink-2">
|
||||
<label class="inline-flex min-h-[44px] items-center gap-2 text-sm text-ink-2">
|
||||
<input
|
||||
type="checkbox"
|
||||
name="groupIds"
|
||||
@@ -23,4 +27,4 @@ let selected = $derived([...selectedGroupIds]);
|
||||
{group.name}
|
||||
</label>
|
||||
{/each}
|
||||
</div>
|
||||
</fieldset>
|
||||
|
||||
@@ -1,13 +1,53 @@
|
||||
<script lang="ts">
|
||||
import { page } from '$app/state';
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
|
||||
let copied = $state(false);
|
||||
|
||||
function copyId() {
|
||||
const id = page.error?.errorId;
|
||||
if (!id) return;
|
||||
if (!navigator.clipboard) return;
|
||||
navigator.clipboard.writeText(id).then(
|
||||
() => {
|
||||
copied = true;
|
||||
setTimeout(() => (copied = false), 2000);
|
||||
},
|
||||
() => {
|
||||
/* clipboard denied or unavailable — select-all on the <code> element remains */
|
||||
}
|
||||
);
|
||||
}
|
||||
</script>
|
||||
|
||||
<svelte:head>
|
||||
<title>{m.page_title_error()}</title>
|
||||
</svelte:head>
|
||||
|
||||
<div class="px-4 py-12 text-center font-sans">
|
||||
<p class="font-sans text-6xl font-bold text-ink">{page.status}</p>
|
||||
<p class="mt-2 font-sans text-sm text-ink-2">{page.error?.message ?? 'Internal Error'}</p>
|
||||
</div>
|
||||
<main class="px-4 py-12 text-center font-sans">
|
||||
<h1 class="mb-2 font-serif text-2xl font-bold text-ink">{m.page_title_error()}</h1>
|
||||
<p class="mb-8 font-sans text-sm text-ink-2">
|
||||
{page.error?.message ?? m.error_internal_error()}
|
||||
</p>
|
||||
<p class="mb-4 font-mono text-4xl font-bold text-ink">{page.status}</p>
|
||||
|
||||
{#if page.error?.errorId}
|
||||
<div class="mt-6 flex flex-col items-center gap-3">
|
||||
<p class="font-sans text-xs tracking-widest text-ink-2 uppercase">
|
||||
{m.error_page_id_label()}
|
||||
</p>
|
||||
<code
|
||||
class="rounded border border-line bg-surface px-3 py-1 font-mono text-sm text-ink select-all"
|
||||
>
|
||||
{page.error.errorId}
|
||||
</code>
|
||||
<button
|
||||
class="min-h-[44px] min-w-[44px] rounded-sm bg-brand-navy px-5 py-2 font-sans text-sm text-white transition-colors hover:opacity-90 focus-visible:ring-2 focus-visible:ring-brand-navy focus-visible:ring-offset-2"
|
||||
onclick={copyId}
|
||||
aria-label={m.error_copy_id_label()}
|
||||
>
|
||||
<span aria-live="polite">{copied ? m.error_copied() : m.error_copy_id_label()}</span>
|
||||
</button>
|
||||
</div>
|
||||
{/if}
|
||||
</main>
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
<script lang="ts">
|
||||
import { enhance } from '$app/forms';
|
||||
import { beforeNavigate, goto } from '$app/navigation';
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
import { createUnsavedWarning } from '$lib/shared/hooks/useUnsavedWarning.svelte';
|
||||
import UnsavedWarningBanner from '$lib/shared/primitives/UnsavedWarningBanner.svelte';
|
||||
|
||||
const availableStandard = $derived([
|
||||
{ value: 'READ_ALL', label: m.admin_perm_read_all() },
|
||||
@@ -18,17 +19,7 @@ const availableAdmin = $derived([
|
||||
|
||||
let { form } = $props();
|
||||
|
||||
let isDirty = $state(false);
|
||||
let showUnsavedWarning = $state(false);
|
||||
let discardTarget: string | null = $state(null);
|
||||
|
||||
beforeNavigate(({ cancel, to }) => {
|
||||
if (isDirty) {
|
||||
cancel();
|
||||
showUnsavedWarning = true;
|
||||
discardTarget = to?.url.href ?? null;
|
||||
}
|
||||
});
|
||||
const unsaved = createUnsavedWarning();
|
||||
</script>
|
||||
|
||||
<div class="flex flex-1 flex-col overflow-hidden">
|
||||
@@ -58,23 +49,8 @@ beforeNavigate(({ cancel, to }) => {
|
||||
|
||||
<!-- Scrollable body -->
|
||||
<div class="flex-1 overflow-y-auto px-5 py-5">
|
||||
{#if showUnsavedWarning}
|
||||
<div
|
||||
class="mb-5 flex items-center justify-between rounded border border-amber-200 bg-amber-50 p-3 text-sm text-amber-800 dark:border-amber-800 dark:bg-amber-950/40 dark:text-amber-300"
|
||||
>
|
||||
<span>{m.admin_unsaved_warning()}</span>
|
||||
<button
|
||||
type="button"
|
||||
onclick={() => {
|
||||
isDirty = false;
|
||||
showUnsavedWarning = false;
|
||||
if (discardTarget) goto(discardTarget);
|
||||
}}
|
||||
class="ml-4 shrink-0 font-sans text-xs font-bold tracking-widest text-amber-800 uppercase hover:text-amber-900 dark:text-amber-300"
|
||||
>
|
||||
{m.person_discard_changes()}
|
||||
</button>
|
||||
</div>
|
||||
{#if unsaved.showUnsavedWarning}
|
||||
<UnsavedWarningBanner onDiscard={unsaved.discard} />
|
||||
{/if}
|
||||
{#if form?.error}
|
||||
<div class="mb-5 rounded border border-red-200 bg-red-50 p-3 text-sm text-red-700">
|
||||
@@ -85,11 +61,11 @@ beforeNavigate(({ cancel, to }) => {
|
||||
<form
|
||||
id="new-group-form"
|
||||
method="POST"
|
||||
use:enhance
|
||||
oninput={() => {
|
||||
isDirty = true;
|
||||
showUnsavedWarning = false;
|
||||
use:enhance={() => async ({ result, update }) => {
|
||||
if (result.type === 'redirect') unsaved.clearOnSuccess();
|
||||
await update();
|
||||
}}
|
||||
oninput={unsaved.markDirty}
|
||||
class="space-y-5"
|
||||
>
|
||||
<!-- Name card -->
|
||||
|
||||
125
frontend/src/routes/admin/groups/new/page.svelte.spec.ts
Normal file
125
frontend/src/routes/admin/groups/new/page.svelte.spec.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { cleanup, render } from 'vitest-browser-svelte';
|
||||
import { page } from 'vitest/browser';
|
||||
import Page from './+page.svelte';
|
||||
|
||||
const enhanceCaptureRef = vi.hoisted(() => ({ submitFn: undefined as unknown }));
|
||||
|
||||
vi.mock('$app/forms', () => ({
|
||||
enhance: (_el: HTMLFormElement, fn?: unknown) => {
|
||||
enhanceCaptureRef.submitFn = fn;
|
||||
return { destroy: vi.fn() };
|
||||
}
|
||||
}));
|
||||
vi.mock('$app/navigation', () => ({ beforeNavigate: vi.fn(), goto: vi.fn() }));
|
||||
|
||||
import { beforeNavigate, goto } from '$app/navigation';
|
||||
|
||||
afterEach(cleanup);
|
||||
|
||||
type SubmitFn = () => Promise<
|
||||
(opts: {
|
||||
result: { type: string; [key: string]: unknown };
|
||||
update: () => Promise<void>;
|
||||
}) => Promise<void>
|
||||
>;
|
||||
|
||||
// ─── Unsaved-changes guard ────────────────────────────────────────────────────
|
||||
|
||||
describe('Admin new group page – unsaved-changes guard', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
enhanceCaptureRef.submitFn = undefined;
|
||||
});
|
||||
|
||||
it('does not show unsaved warning initially', async () => {
|
||||
render(Page, { props: { form: null } });
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('cancels navigation and shows banner when form is dirty', async () => {
|
||||
render(Page, { props: { form: null } });
|
||||
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="name"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
const cancel = vi.fn();
|
||||
callback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
|
||||
|
||||
expect(cancel).toHaveBeenCalled();
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('does not cancel navigation when form is clean', async () => {
|
||||
render(Page, { props: { form: null } });
|
||||
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
const cancel = vi.fn();
|
||||
callback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
|
||||
|
||||
expect(cancel).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('discard button calls goto with the target URL', async () => {
|
||||
render(Page, { props: { form: null } });
|
||||
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="name"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
callback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/groups') } });
|
||||
|
||||
await page.getByRole('button', { name: /verwerfen/i }).click();
|
||||
|
||||
expect(vi.mocked(goto)).toHaveBeenCalledWith('http://localhost/admin/groups');
|
||||
});
|
||||
|
||||
it('clears banner when enhance callback receives a redirect result', async () => {
|
||||
render(Page, { props: { form: null } });
|
||||
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="name"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/groups') } });
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
|
||||
|
||||
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
|
||||
await innerFn({
|
||||
result: { type: 'redirect', location: '/admin/groups', status: 303 },
|
||||
update: vi.fn().mockResolvedValue(undefined)
|
||||
});
|
||||
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
|
||||
|
||||
const cancel = vi.fn();
|
||||
navCallback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
|
||||
expect(cancel).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('keeps banner when enhance callback receives a failure result', async () => {
|
||||
render(Page, { props: { form: null } });
|
||||
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="name"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/groups') } });
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
|
||||
|
||||
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
|
||||
await innerFn({
|
||||
result: { type: 'failure', status: 400, data: { error: 'Name bereits vergeben' } },
|
||||
update: vi.fn().mockResolvedValue(undefined)
|
||||
});
|
||||
|
||||
const cancel = vi.fn();
|
||||
navCallback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
|
||||
expect(cancel).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@@ -2,6 +2,7 @@ import { fail } from '@sveltejs/kit';
|
||||
import { env } from '$env/dynamic/private';
|
||||
import { parseBackendError } from '$lib/shared/errors';
|
||||
import type { Actions, PageServerLoad } from './$types';
|
||||
import type { components } from '$lib/generated/api';
|
||||
|
||||
export interface InviteListItem {
|
||||
id: string;
|
||||
@@ -17,22 +18,37 @@ export interface InviteListItem {
|
||||
shareableUrl: string;
|
||||
}
|
||||
|
||||
export type UserGroup = components['schemas']['UserGroup'];
|
||||
|
||||
export const load: PageServerLoad = async ({ url, fetch }) => {
|
||||
const status = url.searchParams.get('status') ?? 'active';
|
||||
const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
|
||||
const res = await fetch(`${apiUrl}/api/invites?status=${encodeURIComponent(status)}`);
|
||||
|
||||
if (!res.ok) {
|
||||
const backendError = await parseBackendError(res);
|
||||
return {
|
||||
invites: [] as InviteListItem[],
|
||||
status,
|
||||
loadError: backendError?.code ?? 'INTERNAL_ERROR'
|
||||
};
|
||||
const [invitesRes, groupsRes] = await Promise.all([
|
||||
fetch(`${apiUrl}/api/invites?status=${encodeURIComponent(status)}`),
|
||||
fetch(`${apiUrl}/api/groups`)
|
||||
]);
|
||||
|
||||
let invites: InviteListItem[] = [];
|
||||
let loadError: string | null = null;
|
||||
if (!invitesRes.ok) {
|
||||
const backendError = await parseBackendError(invitesRes);
|
||||
loadError = backendError?.code ?? 'INTERNAL_ERROR';
|
||||
} else {
|
||||
invites = await invitesRes.json();
|
||||
}
|
||||
|
||||
const invites: InviteListItem[] = await res.json();
|
||||
return { invites, status, loadError: null };
|
||||
let groups: UserGroup[] = [];
|
||||
let groupsLoadError: string | null = null;
|
||||
if (!groupsRes.ok) {
|
||||
const backendError = await parseBackendError(groupsRes);
|
||||
groupsLoadError = backendError?.code ?? 'INTERNAL_ERROR';
|
||||
} else {
|
||||
const raw: UserGroup[] = await groupsRes.json();
|
||||
groups = [...raw].sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
|
||||
return { invites, status, loadError, groups, groupsLoadError };
|
||||
};
|
||||
|
||||
export const actions = {
|
||||
@@ -45,6 +61,7 @@ export const actions = {
|
||||
const prefillLastName = (formData.get('prefillLastName') as string) || undefined;
|
||||
const prefillEmail = (formData.get('prefillEmail') as string) || undefined;
|
||||
const expiresAt = (formData.get('expiresAt') as string) || undefined;
|
||||
const groupIds = formData.getAll('groupIds') as string[];
|
||||
|
||||
const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
|
||||
const res = await fetch(`${apiUrl}/api/invites`, {
|
||||
@@ -56,7 +73,8 @@ export const actions = {
|
||||
prefillFirstName,
|
||||
prefillLastName,
|
||||
prefillEmail,
|
||||
expiresAt
|
||||
expiresAt,
|
||||
groupIds
|
||||
})
|
||||
});
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
import { enhance } from '$app/forms';
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
import { getErrorMessage } from '$lib/shared/errors';
|
||||
import type { InviteListItem } from './+page.server.ts';
|
||||
import UserGroupsSection from '$lib/user/UserGroupsSection.svelte';
|
||||
import type { InviteListItem, UserGroup } from './+page.server.ts';
|
||||
|
||||
let {
|
||||
data,
|
||||
@@ -12,6 +13,8 @@ let {
|
||||
invites: InviteListItem[];
|
||||
status: string;
|
||||
loadError: string | null;
|
||||
groups: UserGroup[];
|
||||
groupsLoadError: string | null;
|
||||
};
|
||||
form?: {
|
||||
createError?: string;
|
||||
@@ -253,6 +256,23 @@ function statusIcon(status: string) {
|
||||
class="block w-full border border-line px-3 py-2 font-serif text-sm text-ink focus:outline-none focus-visible:ring-2 focus-visible:ring-focus-ring"
|
||||
/>
|
||||
</div>
|
||||
<div class="sm:col-span-2">
|
||||
<p class="mb-2 font-sans text-xs font-bold tracking-widest text-ink-3 uppercase">
|
||||
{m.admin_new_invite_groups()}
|
||||
</p>
|
||||
{#if data.groupsLoadError}
|
||||
<div
|
||||
role="alert"
|
||||
class="rounded-sm border border-amber-200 bg-amber-50 px-3 py-2 font-sans text-xs text-amber-700"
|
||||
>
|
||||
{m.admin_invite_groups_load_error()}
|
||||
</div>
|
||||
{:else if data.groups.length === 0}
|
||||
<p class="font-sans text-xs text-ink-3 italic">{m.admin_new_invite_no_groups()}</p>
|
||||
{:else}
|
||||
<UserGroupsSection groups={data.groups} />
|
||||
{/if}
|
||||
</div>
|
||||
{#if form?.createError}
|
||||
<div class="font-sans text-xs font-medium text-red-600 sm:col-span-2">
|
||||
{getErrorMessage(form.createError)}
|
||||
|
||||
155
frontend/src/routes/admin/invites/page.server.test.ts
Normal file
155
frontend/src/routes/admin/invites/page.server.test.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
vi.mock('$env/dynamic/private', () => ({
|
||||
env: { API_INTERNAL_URL: 'http://localhost:8080' }
|
||||
}));
|
||||
|
||||
import { load, actions } from './+page.server';
|
||||
import type { UserGroup } from './+page.server';
|
||||
|
||||
// PageServerLoad annotates the return as `void | (...)`. This explicit shape avoids
|
||||
// the void and the Record<string, any> from the generic constraint.
|
||||
type LoadData = {
|
||||
invites: unknown[];
|
||||
status: string;
|
||||
loadError: string | null;
|
||||
groups: UserGroup[];
|
||||
groupsLoadError: string | null;
|
||||
};
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
type AnyFetch = (...args: any[]) => any;
|
||||
|
||||
function mockResponse(ok: boolean, body: unknown, status = 200) {
|
||||
return {
|
||||
ok,
|
||||
status,
|
||||
json: async () => body,
|
||||
text: async () => JSON.stringify(body),
|
||||
headers: new Headers({ 'content-type': 'application/json' })
|
||||
} as unknown as Response;
|
||||
}
|
||||
|
||||
describe('admin/invites load()', () => {
|
||||
const mockFetch = vi.fn<AnyFetch>();
|
||||
|
||||
beforeEach(() => mockFetch.mockReset());
|
||||
|
||||
function event(status = 'active') {
|
||||
return {
|
||||
url: new URL(`http://localhost/admin/invites?status=${status}`),
|
||||
fetch: mockFetch as unknown as typeof fetch
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any;
|
||||
}
|
||||
|
||||
it('returns groups array alongside invites when both succeed', async () => {
|
||||
mockFetch.mockResolvedValueOnce(mockResponse(true, [])).mockResolvedValueOnce(
|
||||
mockResponse(true, [
|
||||
{ id: 'g-1', name: 'Familie', permissions: ['READ_ALL'] },
|
||||
{ id: 'g-2', name: 'Administratoren', permissions: ['ADMIN'] }
|
||||
])
|
||||
);
|
||||
|
||||
const result = (await load(event())) as LoadData;
|
||||
|
||||
expect(result.groups).toHaveLength(2);
|
||||
expect(result.groupsLoadError).toBeNull();
|
||||
});
|
||||
|
||||
it('returns groups sorted alphabetically by name', async () => {
|
||||
mockFetch.mockResolvedValueOnce(mockResponse(true, [])).mockResolvedValueOnce(
|
||||
mockResponse(true, [
|
||||
{ id: 'g-1', name: 'Zebra', permissions: [] },
|
||||
{ id: 'g-2', name: 'Alfa', permissions: [] },
|
||||
{ id: 'g-3', name: 'Mitte', permissions: [] }
|
||||
])
|
||||
);
|
||||
|
||||
const result = (await load(event())) as LoadData;
|
||||
|
||||
expect(result.groups.map((g) => g.name)).toEqual(['Alfa', 'Mitte', 'Zebra']);
|
||||
});
|
||||
|
||||
it('returns groups: [] and non-null groupsLoadError when groups fetch is non-OK', async () => {
|
||||
mockFetch
|
||||
.mockResolvedValueOnce(mockResponse(true, []))
|
||||
.mockResolvedValueOnce(mockResponse(false, { code: 'FORBIDDEN' }, 403));
|
||||
|
||||
const result = (await load(event())) as LoadData;
|
||||
|
||||
expect(result.groups).toEqual([]);
|
||||
expect(result.groupsLoadError).toBe('FORBIDDEN');
|
||||
});
|
||||
|
||||
it('falls back to INTERNAL_ERROR when groups error body has no code', async () => {
|
||||
mockFetch
|
||||
.mockResolvedValueOnce(mockResponse(true, []))
|
||||
.mockResolvedValueOnce(mockResponse(false, null, 500));
|
||||
|
||||
const result = (await load(event())) as LoadData;
|
||||
|
||||
expect(result.groupsLoadError).toBe('INTERNAL_ERROR');
|
||||
});
|
||||
|
||||
it('fetches invites and groups in parallel (both URLs called)', async () => {
|
||||
mockFetch
|
||||
.mockResolvedValueOnce(mockResponse(true, []))
|
||||
.mockResolvedValueOnce(mockResponse(true, []));
|
||||
|
||||
await load(event());
|
||||
|
||||
expect(mockFetch).toHaveBeenCalledTimes(2);
|
||||
expect(mockFetch).toHaveBeenCalledWith(expect.stringContaining('/api/invites'));
|
||||
expect(mockFetch).toHaveBeenCalledWith(expect.stringContaining('/api/groups'));
|
||||
});
|
||||
});
|
||||
|
||||
describe('admin/invites create action', () => {
|
||||
const mockFetch = vi.fn<AnyFetch>();
|
||||
|
||||
beforeEach(() => mockFetch.mockReset());
|
||||
|
||||
const successBody = {
|
||||
id: 'inv-1',
|
||||
code: 'ABCDE12345',
|
||||
displayCode: 'ABCDE-12345',
|
||||
status: 'active',
|
||||
revoked: false,
|
||||
useCount: 0,
|
||||
createdAt: '2026-01-01T00:00:00Z',
|
||||
shareableUrl: 'http://localhost/register?code=ABCDE12345'
|
||||
};
|
||||
|
||||
it('includes groupIds array in POST body when checkboxes are checked', async () => {
|
||||
const fd = new FormData();
|
||||
fd.append('groupIds', 'g-1');
|
||||
fd.append('groupIds', 'g-2');
|
||||
mockFetch.mockResolvedValueOnce(mockResponse(true, successBody, 201));
|
||||
|
||||
await actions.create({
|
||||
request: new Request('http://localhost', { method: 'POST', body: fd }),
|
||||
fetch: mockFetch as unknown as typeof fetch
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any);
|
||||
|
||||
const [, init] = mockFetch.mock.calls[0] as [string, RequestInit];
|
||||
const sent = JSON.parse(init.body as string);
|
||||
expect(sent.groupIds).toEqual(['g-1', 'g-2']);
|
||||
});
|
||||
|
||||
it('sends groupIds: [] when no checkboxes are checked', async () => {
|
||||
const fd = new FormData();
|
||||
mockFetch.mockResolvedValueOnce(mockResponse(true, successBody, 201));
|
||||
|
||||
await actions.create({
|
||||
request: new Request('http://localhost', { method: 'POST', body: fd }),
|
||||
fetch: mockFetch as unknown as typeof fetch
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any);
|
||||
|
||||
const [, init] = mockFetch.mock.calls[0] as [string, RequestInit];
|
||||
const sent = JSON.parse(init.body as string);
|
||||
expect(sent.groupIds).toEqual([]);
|
||||
});
|
||||
});
|
||||
@@ -7,12 +7,15 @@ afterEach(cleanup);
|
||||
|
||||
const makeInvite = (overrides: Record<string, unknown> = {}) => ({
|
||||
id: 'i-1',
|
||||
code: 'XYZ1234567',
|
||||
displayCode: 'XYZ-1234',
|
||||
label: 'Familie',
|
||||
useCount: 0,
|
||||
maxUses: 5,
|
||||
expiresAt: '2027-01-01T00:00:00Z',
|
||||
revoked: false,
|
||||
status: 'active' as string,
|
||||
createdAt: '2025-01-01T00:00:00Z',
|
||||
shareableUrl: 'http://example.com/i/i-1',
|
||||
...overrides
|
||||
});
|
||||
@@ -22,11 +25,15 @@ const baseData = (
|
||||
invites: ReturnType<typeof makeInvite>[];
|
||||
status: string;
|
||||
loadError: string | null;
|
||||
groups: { id: string; name: string; permissions: string[] }[];
|
||||
groupsLoadError: string | null;
|
||||
}> = {}
|
||||
) => ({
|
||||
invites: [],
|
||||
status: 'active',
|
||||
loadError: null,
|
||||
groups: [],
|
||||
groupsLoadError: null,
|
||||
...overrides
|
||||
});
|
||||
|
||||
@@ -253,4 +260,115 @@ describe('admin/invites page', () => {
|
||||
const banner = document.querySelector('.bg-red-50');
|
||||
expect(banner).not.toBeNull();
|
||||
});
|
||||
|
||||
// ─── groups section ───────────────────────────────────────────────────────
|
||||
|
||||
it('shows a groups-load warning banner when data.groupsLoadError is set', async () => {
|
||||
render(AdminInvitesPage, {
|
||||
props: { data: { ...baseData(), groups: [], groupsLoadError: 'INTERNAL_ERROR' } }
|
||||
});
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: /neue einladung/i })
|
||||
.first()
|
||||
.click();
|
||||
|
||||
const banner = document.querySelector('.bg-amber-50');
|
||||
expect(banner).not.toBeNull();
|
||||
});
|
||||
|
||||
it('renders group checkboxes inside the new-invite form when groups are provided', async () => {
|
||||
render(AdminInvitesPage, {
|
||||
props: {
|
||||
data: {
|
||||
...baseData(),
|
||||
groups: [
|
||||
{ id: 'g-1', name: 'Administratoren', permissions: ['ADMIN'] },
|
||||
{ id: 'g-2', name: 'Familie', permissions: ['READ_ALL'] }
|
||||
],
|
||||
groupsLoadError: null
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: /neue einladung/i })
|
||||
.first()
|
||||
.click();
|
||||
|
||||
await expect.element(page.getByRole('checkbox', { name: 'Administratoren' })).toBeVisible();
|
||||
await expect.element(page.getByRole('checkbox', { name: 'Familie' })).toBeVisible();
|
||||
});
|
||||
|
||||
it('group checkbox stays checked after being clicked', async () => {
|
||||
render(AdminInvitesPage, {
|
||||
props: {
|
||||
data: {
|
||||
...baseData(),
|
||||
groups: [{ id: 'g-1', name: 'Familie', permissions: ['READ_ALL'] }],
|
||||
groupsLoadError: null
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: /neue einladung/i })
|
||||
.first()
|
||||
.click();
|
||||
|
||||
const checkbox = page.getByRole('checkbox', { name: 'Familie' });
|
||||
await checkbox.click();
|
||||
await expect.element(checkbox).toBeChecked();
|
||||
});
|
||||
|
||||
it('amber warning banner has role="alert"', async () => {
|
||||
render(AdminInvitesPage, {
|
||||
props: { data: { ...baseData(), groups: [], groupsLoadError: 'INTERNAL_ERROR' } }
|
||||
});
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: /neue einladung/i })
|
||||
.first()
|
||||
.click();
|
||||
|
||||
const alert = document.querySelector('[role="alert"]');
|
||||
expect(alert).not.toBeNull();
|
||||
});
|
||||
|
||||
it('checkbox group fieldset has accessible name from i18n key (not hardcoded German)', async () => {
|
||||
render(AdminInvitesPage, {
|
||||
props: {
|
||||
data: {
|
||||
...baseData(),
|
||||
groups: [{ id: 'g-1', name: 'Familie', permissions: ['READ_ALL'] }],
|
||||
groupsLoadError: null
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: /neue einladung/i })
|
||||
.first()
|
||||
.click();
|
||||
|
||||
// m.admin_new_invite_groups() returns "Gruppen (optional)" in de locale
|
||||
// The hardcoded legend "Gruppen" would not match this accessible name
|
||||
await expect.element(page.getByRole('group', { name: 'Gruppen (optional)' })).toBeVisible();
|
||||
});
|
||||
|
||||
it('shows no checkboxes and no warning when groups list is empty and no error', async () => {
|
||||
render(AdminInvitesPage, {
|
||||
props: { data: { ...baseData(), groups: [], groupsLoadError: null } }
|
||||
});
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: /neue einladung/i })
|
||||
.first()
|
||||
.click();
|
||||
|
||||
expect(document.querySelectorAll('input[name="groupIds"]')).toHaveLength(0);
|
||||
expect(document.querySelector('.bg-amber-50')).toBeNull();
|
||||
// empty-state message visible — "Keine Gruppen vorhanden." in de locale
|
||||
await expect.element(page.getByText(/keine gruppen/i)).toBeVisible();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,19 +1,14 @@
|
||||
<script lang="ts">
|
||||
import { onDestroy } from 'svelte';
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
import ImportStatusCard from './ImportStatusCard.svelte';
|
||||
import type { ImportStatus } from './types.js';
|
||||
|
||||
let backfillResult: number | null = $state(null);
|
||||
let backfillLoading = $state(false);
|
||||
let backfillHashesResult: number | null = $state(null);
|
||||
let backfillHashesLoading = $state(false);
|
||||
|
||||
type ImportStatus = {
|
||||
state: 'IDLE' | 'RUNNING' | 'DONE' | 'FAILED';
|
||||
message: string;
|
||||
processed: number;
|
||||
startedAt: string | null;
|
||||
};
|
||||
|
||||
type ThumbnailStatus = {
|
||||
state: 'IDLE' | 'RUNNING' | 'DONE' | 'FAILED';
|
||||
message: string;
|
||||
@@ -177,47 +172,7 @@ async function backfillFileHashes() {
|
||||
</div>
|
||||
|
||||
<!-- Mass import -->
|
||||
<div class="rounded-sm border border-line bg-surface p-6 shadow-sm">
|
||||
<h2 class="mb-1 font-sans text-sm font-bold text-ink">{m.admin_system_import_heading()}</h2>
|
||||
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_description()}</p>
|
||||
|
||||
{#if importStatus?.state === 'RUNNING'}
|
||||
<p class="text-sm text-ink-2">{m.admin_system_import_status_running()}</p>
|
||||
{:else if importStatus?.state === 'DONE'}
|
||||
<p class="mb-4 rounded-sm border border-green-200 bg-green-50 p-3 text-sm text-green-700">
|
||||
{m.admin_system_import_status_done({ count: importStatus.processed })}
|
||||
</p>
|
||||
<button
|
||||
data-import-trigger
|
||||
onclick={triggerImport}
|
||||
class="rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
|
||||
>
|
||||
{m.admin_system_import_btn_retry()}
|
||||
</button>
|
||||
{:else if importStatus?.state === 'FAILED'}
|
||||
<p class="mb-4 rounded-sm border border-red-200 bg-red-50 p-3 text-sm text-red-700">
|
||||
{m.admin_system_import_status_failed({ message: importStatus.message })}
|
||||
</p>
|
||||
<button
|
||||
data-import-trigger
|
||||
onclick={triggerImport}
|
||||
class="rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
|
||||
>
|
||||
{m.admin_system_import_btn_retry()}
|
||||
</button>
|
||||
{:else}
|
||||
{#if importStatus !== null}
|
||||
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_status_idle()}</p>
|
||||
{/if}
|
||||
<button
|
||||
data-import-trigger
|
||||
onclick={triggerImport}
|
||||
class="rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
|
||||
>
|
||||
{m.admin_system_import_btn_start()}
|
||||
</button>
|
||||
{/if}
|
||||
</div>
|
||||
<ImportStatusCard importStatus={importStatus} ontrigger={triggerImport} />
|
||||
|
||||
<!-- Thumbnail backfill -->
|
||||
<div class="rounded-sm border border-line bg-surface p-6 shadow-sm">
|
||||
|
||||
81
frontend/src/routes/admin/system/ImportStatusCard.svelte
Normal file
81
frontend/src/routes/admin/system/ImportStatusCard.svelte
Normal file
@@ -0,0 +1,81 @@
|
||||
<script lang="ts">
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
import type { ImportStatus } from './types.js';
|
||||
|
||||
let {
|
||||
importStatus,
|
||||
ontrigger
|
||||
}: {
|
||||
importStatus: ImportStatus | null;
|
||||
ontrigger: () => void;
|
||||
} = $props();
|
||||
|
||||
const failureMessage = $derived(
|
||||
importStatus?.statusCode === 'IMPORT_FAILED_NO_SPREADSHEET'
|
||||
? m.admin_system_import_failed_no_spreadsheet()
|
||||
: m.admin_system_import_failed_internal()
|
||||
);
|
||||
</script>
|
||||
|
||||
<div class="rounded-sm border border-line bg-surface p-6 shadow-sm">
|
||||
<h2 class="mb-5 font-sans text-xs font-bold tracking-widest text-ink-3 uppercase">
|
||||
{m.admin_system_import_heading()}
|
||||
</h2>
|
||||
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_description()}</p>
|
||||
|
||||
{#if importStatus?.state === 'RUNNING'}
|
||||
<div class="mb-4 flex items-center gap-3">
|
||||
<span
|
||||
data-testid="spinner"
|
||||
role="status"
|
||||
aria-label={m.admin_system_import_status_running()}
|
||||
class="inline-block h-5 w-5 animate-spin rounded-full border-2 border-ink-3 border-t-brand-mint motion-reduce:animate-none"
|
||||
></span>
|
||||
<div>
|
||||
<p data-testid="processed-count" class="text-base font-bold text-ink">
|
||||
{importStatus.processed}
|
||||
</p>
|
||||
<p class="font-sans text-xs font-bold tracking-widest text-ink-3 uppercase">
|
||||
{m.admin_system_import_status_running()}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
{:else if importStatus?.state === 'DONE'}
|
||||
<div class="mb-4 rounded-sm border border-green-200 bg-green-50 p-4 text-green-700">
|
||||
<p data-testid="processed-count" class="text-base font-bold">{importStatus.processed}</p>
|
||||
<p class="font-sans text-xs font-bold tracking-widest text-green-800 uppercase">
|
||||
{m.admin_system_import_status_done_label()}
|
||||
</p>
|
||||
<p class="mt-1 text-xs text-green-800">{m.admin_system_import_status_done()}</p>
|
||||
</div>
|
||||
<button
|
||||
data-import-trigger
|
||||
onclick={ontrigger}
|
||||
class="min-h-[44px] rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
|
||||
>
|
||||
{m.admin_system_import_btn_retry()}
|
||||
</button>
|
||||
{:else if importStatus?.state === 'FAILED'}
|
||||
<p class="mb-4 rounded-sm border border-red-200 bg-red-50 p-3 text-sm text-red-700">
|
||||
{failureMessage}
|
||||
</p>
|
||||
<button
|
||||
data-import-trigger
|
||||
onclick={ontrigger}
|
||||
class="min-h-[44px] rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
|
||||
>
|
||||
{m.admin_system_import_btn_retry()}
|
||||
</button>
|
||||
{:else}
|
||||
{#if importStatus !== null}
|
||||
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_status_idle()}</p>
|
||||
{/if}
|
||||
<button
|
||||
data-import-trigger
|
||||
onclick={ontrigger}
|
||||
class="min-h-[44px] rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
|
||||
>
|
||||
{m.admin_system_import_btn_start()}
|
||||
</button>
|
||||
{/if}
|
||||
</div>
|
||||
131
frontend/src/routes/admin/system/ImportStatusCard.svelte.test.ts
Normal file
131
frontend/src/routes/admin/system/ImportStatusCard.svelte.test.ts
Normal file
@@ -0,0 +1,131 @@
|
||||
import { describe, expect, it, vi } from 'vitest';
|
||||
import { render } from 'vitest-browser-svelte';
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
import ImportStatusCard from './ImportStatusCard.svelte';
|
||||
import type { ImportStatus } from './types.js';
|
||||
|
||||
const makeStatus = (overrides: Partial<ImportStatus> = {}): ImportStatus => ({
|
||||
state: 'IDLE',
|
||||
statusCode: 'IMPORT_IDLE',
|
||||
processed: 0,
|
||||
startedAt: null,
|
||||
...overrides
|
||||
});
|
||||
|
||||
describe('ImportStatusCard', () => {
|
||||
it('shows spinner while state is RUNNING', async () => {
|
||||
const { getByTestId } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'RUNNING', statusCode: 'IMPORT_RUNNING', processed: 3 }),
|
||||
ontrigger: () => {}
|
||||
}
|
||||
});
|
||||
|
||||
await expect.element(getByTestId('spinner')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows processed count at text-base while RUNNING', async () => {
|
||||
const { getByTestId } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'RUNNING', statusCode: 'IMPORT_RUNNING', processed: 7 }),
|
||||
ontrigger: () => {}
|
||||
}
|
||||
});
|
||||
|
||||
await expect.element(getByTestId('processed-count')).toHaveTextContent('7');
|
||||
});
|
||||
|
||||
it('shows processed count while DONE', async () => {
|
||||
const { getByText } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'DONE', statusCode: 'IMPORT_DONE', processed: 42 }),
|
||||
ontrigger: () => {}
|
||||
}
|
||||
});
|
||||
|
||||
await expect.element(getByText('42')).toBeVisible();
|
||||
});
|
||||
|
||||
it('shows no-spreadsheet message when statusCode is IMPORT_FAILED_NO_SPREADSHEET', async () => {
|
||||
const { getByText } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({
|
||||
state: 'FAILED',
|
||||
statusCode: 'IMPORT_FAILED_NO_SPREADSHEET'
|
||||
}),
|
||||
ontrigger: () => {}
|
||||
}
|
||||
});
|
||||
|
||||
await expect.element(getByText(m.admin_system_import_failed_no_spreadsheet())).toBeVisible();
|
||||
});
|
||||
|
||||
it('shows internal error message when statusCode is IMPORT_FAILED_INTERNAL', async () => {
|
||||
const { getByText } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'FAILED', statusCode: 'IMPORT_FAILED_INTERNAL' }),
|
||||
ontrigger: () => {}
|
||||
}
|
||||
});
|
||||
|
||||
await expect.element(getByText(m.admin_system_import_failed_internal())).toBeVisible();
|
||||
});
|
||||
|
||||
it('shows idle text when importStatus is non-null and state is IDLE', async () => {
|
||||
const { getByText } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'IDLE', statusCode: 'IMPORT_IDLE' }),
|
||||
ontrigger: () => {}
|
||||
}
|
||||
});
|
||||
|
||||
await expect.element(getByText(m.admin_system_import_status_idle())).toBeVisible();
|
||||
});
|
||||
|
||||
it('shows no spinner when importStatus is null', async () => {
|
||||
const { getByTestId } = render(ImportStatusCard, {
|
||||
props: { importStatus: null, ontrigger: () => {} }
|
||||
});
|
||||
|
||||
await expect.element(getByTestId('spinner')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('calls ontrigger when retry button is clicked in DONE state', async () => {
|
||||
const ontrigger = vi.fn();
|
||||
const { getByRole } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'DONE', statusCode: 'IMPORT_DONE', processed: 5 }),
|
||||
ontrigger
|
||||
}
|
||||
});
|
||||
|
||||
await getByRole('button').click();
|
||||
expect(ontrigger).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('calls ontrigger when retry button is clicked in FAILED state', async () => {
|
||||
const ontrigger = vi.fn();
|
||||
const { getByRole } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'FAILED', statusCode: 'IMPORT_FAILED_INTERNAL' }),
|
||||
ontrigger
|
||||
}
|
||||
});
|
||||
|
||||
await getByRole('button').click();
|
||||
expect(ontrigger).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('calls ontrigger when start button is clicked in IDLE state', async () => {
|
||||
const ontrigger = vi.fn();
|
||||
const { getByRole } = render(ImportStatusCard, {
|
||||
props: {
|
||||
importStatus: makeStatus({ state: 'IDLE', statusCode: 'IMPORT_IDLE' }),
|
||||
ontrigger
|
||||
}
|
||||
});
|
||||
|
||||
await getByRole('button').click();
|
||||
expect(ontrigger).toHaveBeenCalledOnce();
|
||||
});
|
||||
});
|
||||
@@ -163,7 +163,7 @@ describe('Admin system page — mass import card', () => {
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
state: 'FAILED',
|
||||
message: 'Datei nicht gefunden.',
|
||||
statusCode: 'IMPORT_FAILED_NO_SPREADSHEET',
|
||||
processed: 0,
|
||||
startedAt: '2026-01-01T10:00:00'
|
||||
})
|
||||
@@ -182,7 +182,7 @@ describe('Admin system page — mass import card', () => {
|
||||
})
|
||||
);
|
||||
render(Page, {});
|
||||
await expect.element(page.getByText(/Datei nicht gefunden/i)).toBeInTheDocument();
|
||||
await expect.element(page.getByText(/Keine Tabellendatei gefunden/i)).toBeInTheDocument();
|
||||
await expect.element(page.getByRole('button', { name: /Erneut starten/i })).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -246,7 +246,7 @@ describe('admin/system page', () => {
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
state: 'FAILED',
|
||||
message: 'database error',
|
||||
statusCode: 'IMPORT_FAILED_INTERNAL',
|
||||
processed: 0,
|
||||
startedAt: null
|
||||
}),
|
||||
@@ -262,7 +262,7 @@ describe('admin/system page', () => {
|
||||
render(AdminSystemPage, { props: {} });
|
||||
|
||||
await vi.waitFor(() => {
|
||||
expect(document.body.textContent).toContain('database error');
|
||||
expect(document.body.textContent).toContain('Interner Fehler beim Import');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
6
frontend/src/routes/admin/system/types.ts
Normal file
6
frontend/src/routes/admin/system/types.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export type ImportStatus = {
|
||||
state: 'IDLE' | 'RUNNING' | 'DONE' | 'FAILED';
|
||||
statusCode: string;
|
||||
processed: number;
|
||||
startedAt: string | null;
|
||||
};
|
||||
@@ -1,24 +1,15 @@
|
||||
<script lang="ts">
|
||||
import { enhance } from '$app/forms';
|
||||
import { beforeNavigate, goto } from '$app/navigation';
|
||||
import { m } from '$lib/paraglide/messages.js';
|
||||
import UserProfileSection from '$lib/user/UserProfileSection.svelte';
|
||||
import UserGroupsSection from '$lib/user/UserGroupsSection.svelte';
|
||||
import AccountSection from './AccountSection.svelte';
|
||||
import { createUnsavedWarning } from '$lib/shared/hooks/useUnsavedWarning.svelte';
|
||||
import UnsavedWarningBanner from '$lib/shared/primitives/UnsavedWarningBanner.svelte';
|
||||
|
||||
let { data, form } = $props();
|
||||
|
||||
let isDirty = $state(false);
|
||||
let showUnsavedWarning = $state(false);
|
||||
let discardTarget: string | null = $state(null);
|
||||
|
||||
beforeNavigate(({ cancel, to }) => {
|
||||
if (isDirty) {
|
||||
cancel();
|
||||
showUnsavedWarning = true;
|
||||
discardTarget = to?.url.href ?? null;
|
||||
}
|
||||
});
|
||||
const unsaved = createUnsavedWarning();
|
||||
</script>
|
||||
|
||||
<div class="flex flex-1 flex-col overflow-hidden">
|
||||
@@ -44,23 +35,8 @@ beforeNavigate(({ cancel, to }) => {
|
||||
|
||||
<!-- Scrollable body -->
|
||||
<div class="flex-1 overflow-y-auto px-5 py-5">
|
||||
{#if showUnsavedWarning}
|
||||
<div
|
||||
class="mb-5 flex items-center justify-between rounded border border-amber-200 bg-amber-50 p-3 text-sm text-amber-800 dark:border-amber-800 dark:bg-amber-950/40 dark:text-amber-300"
|
||||
>
|
||||
<span>{m.admin_unsaved_warning()}</span>
|
||||
<button
|
||||
type="button"
|
||||
onclick={() => {
|
||||
isDirty = false;
|
||||
showUnsavedWarning = false;
|
||||
if (discardTarget) goto(discardTarget);
|
||||
}}
|
||||
class="ml-4 shrink-0 font-sans text-xs font-bold tracking-widest text-amber-800 uppercase hover:text-amber-900 dark:text-amber-300"
|
||||
>
|
||||
{m.person_discard_changes()}
|
||||
</button>
|
||||
</div>
|
||||
{#if unsaved.showUnsavedWarning}
|
||||
<UnsavedWarningBanner onDiscard={unsaved.discard} />
|
||||
{/if}
|
||||
{#if form?.error}
|
||||
<div class="mb-5 rounded border border-red-200 bg-red-50 p-3 text-sm text-red-700">
|
||||
@@ -71,11 +47,11 @@ beforeNavigate(({ cancel, to }) => {
|
||||
<form
|
||||
id="new-user-form"
|
||||
method="POST"
|
||||
use:enhance
|
||||
oninput={() => {
|
||||
isDirty = true;
|
||||
showUnsavedWarning = false;
|
||||
use:enhance={() => async ({ result, update }) => {
|
||||
if (result.type === 'redirect') unsaved.clearOnSuccess();
|
||||
await update();
|
||||
}}
|
||||
oninput={unsaved.markDirty}
|
||||
class="space-y-5"
|
||||
>
|
||||
<div class="rounded-sm border border-line bg-surface p-5 shadow-sm">
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
import { afterEach, describe, expect, it, vi } from 'vitest';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { cleanup, render } from 'vitest-browser-svelte';
|
||||
import { page } from 'vitest/browser';
|
||||
import Page from './+page.svelte';
|
||||
|
||||
vi.mock('$app/forms', () => ({ enhance: () => () => {} }));
|
||||
const enhanceCaptureRef = vi.hoisted(() => ({ submitFn: undefined as unknown }));
|
||||
|
||||
vi.mock('$app/forms', () => ({
|
||||
enhance: (_el: HTMLFormElement, fn?: unknown) => {
|
||||
enhanceCaptureRef.submitFn = fn;
|
||||
return { destroy: vi.fn() };
|
||||
}
|
||||
}));
|
||||
vi.mock('$app/navigation', () => ({ beforeNavigate: vi.fn(), goto: vi.fn() }));
|
||||
|
||||
import { beforeNavigate, goto } from '$app/navigation';
|
||||
|
||||
const groups = [
|
||||
{ id: 'g1', name: 'Editoren', permissions: ['WRITE_ALL'] },
|
||||
@@ -20,6 +30,13 @@ const baseData = {
|
||||
|
||||
afterEach(cleanup);
|
||||
|
||||
type SubmitFn = () => Promise<
|
||||
(opts: {
|
||||
result: { type: string; [key: string]: unknown };
|
||||
update: () => Promise<void>;
|
||||
}) => Promise<void>
|
||||
>;
|
||||
|
||||
// ─── Rendering ────────────────────────────────────────────────────────────────
|
||||
|
||||
describe('Admin new user page – rendering', () => {
|
||||
@@ -66,3 +83,103 @@ describe('Admin new user page – error display', () => {
|
||||
await expect.element(page.getByText('Ein Fehler ist aufgetreten.')).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Unsaved-changes guard ────────────────────────────────────────────────────
|
||||
|
||||
describe('Admin new user page – unsaved-changes guard', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
enhanceCaptureRef.submitFn = undefined;
|
||||
});
|
||||
|
||||
it('does not show unsaved warning initially', async () => {
|
||||
render(Page, { data: baseData, form: null });
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('cancels navigation and shows banner when form is dirty', async () => {
|
||||
render(Page, { data: baseData, form: null });
|
||||
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="email"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
const cancel = vi.fn();
|
||||
callback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
|
||||
|
||||
expect(cancel).toHaveBeenCalled();
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('does not cancel navigation when form is clean', async () => {
|
||||
render(Page, { data: baseData, form: null });
|
||||
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
const cancel = vi.fn();
|
||||
callback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
|
||||
|
||||
expect(cancel).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('discard button calls goto with the target URL', async () => {
|
||||
render(Page, { data: baseData, form: null });
|
||||
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="email"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
callback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/users') } });
|
||||
|
||||
await page.getByRole('button', { name: /verwerfen/i }).click();
|
||||
|
||||
expect(vi.mocked(goto)).toHaveBeenCalledWith('http://localhost/admin/users');
|
||||
});
|
||||
|
||||
it('clears banner when enhance callback receives a redirect result', async () => {
|
||||
render(Page, { data: baseData, form: null });
|
||||
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="email"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/users') } });
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
|
||||
|
||||
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
|
||||
await innerFn({
|
||||
result: { type: 'redirect', location: '/admin/users', status: 303 },
|
||||
update: vi.fn().mockResolvedValue(undefined)
|
||||
});
|
||||
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
|
||||
|
||||
const cancel = vi.fn();
|
||||
navCallback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
|
||||
expect(cancel).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('keeps banner when enhance callback receives a failure result', async () => {
|
||||
render(Page, { data: baseData, form: null });
|
||||
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
|
||||
|
||||
document
|
||||
.querySelector<HTMLInputElement>('input[name="email"]')!
|
||||
.dispatchEvent(new InputEvent('input', { bubbles: true }));
|
||||
|
||||
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/users') } });
|
||||
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
|
||||
|
||||
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
|
||||
await innerFn({
|
||||
result: { type: 'failure', status: 400, data: { error: 'E-Mail bereits vergeben' } },
|
||||
update: vi.fn().mockResolvedValue(undefined)
|
||||
});
|
||||
|
||||
const cancel = vi.fn();
|
||||
navCallback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
|
||||
expect(cancel).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -24,7 +24,6 @@ export const GET: RequestHandler = async ({ url, fetch }) => {
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log('Tags Data', data);
|
||||
|
||||
// 4. Daten zurück an den Browser schicken
|
||||
return json(data);
|
||||
|
||||
@@ -4,7 +4,10 @@ import { page as browserPage } from 'vitest/browser';
|
||||
|
||||
const mockPage = {
|
||||
status: 500,
|
||||
error: { message: 'Internal Error' } as { message: string } | null
|
||||
error: { message: 'Internal Error', errorId: undefined } as {
|
||||
message: string;
|
||||
errorId?: string;
|
||||
} | null
|
||||
};
|
||||
|
||||
vi.mock('$app/state', () => ({
|
||||
@@ -13,6 +16,16 @@ vi.mock('$app/state', () => ({
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('$lib/paraglide/messages.js', () => ({
|
||||
m: {
|
||||
page_title_error: () => 'Es ist etwas schiefgelaufen.',
|
||||
error_internal_error: () => 'Ein unerwarteter Fehler ist aufgetreten.',
|
||||
error_page_id_label: () => 'Fehler-ID',
|
||||
error_copy_id_label: () => 'ID kopieren',
|
||||
error_copied: () => 'Kopiert!'
|
||||
}
|
||||
}));
|
||||
|
||||
afterEach(cleanup);
|
||||
|
||||
async function loadComponent() {
|
||||
@@ -20,7 +33,7 @@ async function loadComponent() {
|
||||
}
|
||||
|
||||
describe('+error.svelte', () => {
|
||||
it('renders the page status code prominently', async () => {
|
||||
it('renders the page status code', async () => {
|
||||
mockPage.status = 404;
|
||||
mockPage.error = { message: 'Not Found' };
|
||||
|
||||
@@ -40,13 +53,79 @@ describe('+error.svelte', () => {
|
||||
await expect.element(browserPage.getByText('Database unavailable')).toBeVisible();
|
||||
});
|
||||
|
||||
it('falls back to the literal "Internal Error" when page.error is null', async () => {
|
||||
it('falls back to error_internal_error message when page.error is null', async () => {
|
||||
mockPage.status = 500;
|
||||
mockPage.error = null;
|
||||
|
||||
const ErrorPage = await loadComponent();
|
||||
render(ErrorPage);
|
||||
|
||||
await expect.element(browserPage.getByText('Internal Error')).toBeVisible();
|
||||
await expect
|
||||
.element(browserPage.getByText('Ein unerwarteter Fehler ist aufgetreten.'))
|
||||
.toBeVisible();
|
||||
});
|
||||
|
||||
it('shows errorId when page.error.errorId is set', async () => {
|
||||
mockPage.status = 500;
|
||||
mockPage.error = { message: 'Something broke', errorId: 'abc-123-def' };
|
||||
|
||||
const ErrorPage = await loadComponent();
|
||||
render(ErrorPage);
|
||||
|
||||
await expect.element(browserPage.getByText('abc-123-def')).toBeVisible();
|
||||
});
|
||||
|
||||
it('shows copy button when errorId is present', async () => {
|
||||
mockPage.status = 500;
|
||||
mockPage.error = { message: 'Something broke', errorId: 'abc-123-def' };
|
||||
|
||||
const ErrorPage = await loadComponent();
|
||||
render(ErrorPage);
|
||||
|
||||
await expect.element(browserPage.getByRole('button', { name: 'ID kopieren' })).toBeVisible();
|
||||
});
|
||||
|
||||
it('does not render errorId section when errorId is absent', async () => {
|
||||
mockPage.status = 500;
|
||||
mockPage.error = { message: 'Something broke' };
|
||||
|
||||
const ErrorPage = await loadComponent();
|
||||
render(ErrorPage);
|
||||
|
||||
await expect.element(browserPage.getByText('Fehler-ID')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows "Kopiert!" after clicking the copy button', async () => {
|
||||
mockPage.status = 500;
|
||||
mockPage.error = { message: 'Something broke', errorId: 'abc-123-def' };
|
||||
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: { writeText: vi.fn().mockResolvedValue(undefined) },
|
||||
configurable: true,
|
||||
writable: true
|
||||
});
|
||||
|
||||
const ErrorPage = await loadComponent();
|
||||
render(ErrorPage);
|
||||
|
||||
await browserPage.getByRole('button', { name: 'ID kopieren' }).click();
|
||||
await expect.element(browserPage.getByText('Kopiert!')).toBeVisible();
|
||||
});
|
||||
|
||||
it('does not show "Kopiert!" when clipboard write is rejected', async () => {
|
||||
mockPage.status = 500;
|
||||
mockPage.error = { message: 'Something broke', errorId: 'abc-123-def' };
|
||||
|
||||
Object.defineProperty(navigator, 'clipboard', {
|
||||
value: { writeText: vi.fn().mockRejectedValue(new Error('denied')) },
|
||||
configurable: true,
|
||||
writable: true
|
||||
});
|
||||
|
||||
const ErrorPage = await loadComponent();
|
||||
render(ErrorPage);
|
||||
|
||||
await browserPage.getByRole('button', { name: 'ID kopieren' }).click();
|
||||
await expect.element(browserPage.getByText('Kopiert!')).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { cleanup, render } from 'vitest-browser-svelte';
|
||||
import { page, userEvent } from 'vitest/browser';
|
||||
import { page } from 'vitest/browser';
|
||||
import { createRawSnippet } from 'svelte';
|
||||
|
||||
vi.mock('$env/static/public', () => ({ PUBLIC_NOTIFICATION_POLL_MS: '60000' }));
|
||||
@@ -96,13 +96,13 @@ describe('Layout – user dropdown', () => {
|
||||
|
||||
it('opens dropdown on button click', async () => {
|
||||
render(Layout, { data: makeData(), children: emptySnippet });
|
||||
await page.getByRole('button', { name: /MM/ }).click();
|
||||
((await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement).click();
|
||||
await expect.element(page.getByRole('link', { name: /Profil/i })).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('profile link points to /profile', async () => {
|
||||
render(Layout, { data: makeData(), children: emptySnippet });
|
||||
await page.getByRole('button', { name: /MM/ }).click();
|
||||
((await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement).click();
|
||||
await expect
|
||||
.element(page.getByRole('link', { name: /Profil/i }))
|
||||
.toHaveAttribute('href', '/profile');
|
||||
@@ -110,16 +110,16 @@ describe('Layout – user dropdown', () => {
|
||||
|
||||
it('logout button is in the dropdown', async () => {
|
||||
render(Layout, { data: makeData(), children: emptySnippet });
|
||||
await page.getByRole('button', { name: /MM/ }).click();
|
||||
((await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement).click();
|
||||
await expect.element(page.getByRole('button', { name: /Abmelden/i })).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('closes dropdown when Escape is pressed', async () => {
|
||||
render(Layout, { data: makeData(), children: emptySnippet });
|
||||
const btn = page.getByRole('button', { name: /MM/ });
|
||||
await btn.click();
|
||||
const btnEl = (await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement;
|
||||
btnEl.click();
|
||||
await expect.element(page.getByRole('link', { name: /Profil/i })).toBeInTheDocument();
|
||||
await userEvent.keyboard('{Escape}');
|
||||
btnEl.dispatchEvent(new KeyboardEvent('keydown', { key: 'Escape', bubbles: true }));
|
||||
await tick();
|
||||
await expect.element(page.getByRole('link', { name: /Profil/i })).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
@@ -1,12 +1,29 @@
|
||||
import { fail, redirect, type Actions } from '@sveltejs/kit';
|
||||
import { env } from '$env/dynamic/private';
|
||||
import { getErrorMessage } from '$lib/shared/errors';
|
||||
import { getErrorMessage, type ErrorCode } from '$lib/shared/errors';
|
||||
import type { PageServerLoad } from './$types';
|
||||
|
||||
export const load: PageServerLoad = ({ url }) => {
|
||||
return { registered: url.searchParams.get('registered') === '1' };
|
||||
return {
|
||||
registered: url.searchParams.get('registered') === '1',
|
||||
reason: url.searchParams.get('reason')
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Extracts the fa_session cookie value from a Set-Cookie response header.
|
||||
* The backend may emit attributes like `Path`, `HttpOnly`, `SameSite=Strict`, `Max-Age`, `Secure`;
|
||||
* we only forward the opaque session id — the SvelteKit cookies API will rewrite
|
||||
* the attributes itself.
|
||||
*/
|
||||
function extractFaSessionId(setCookieHeaders: string[]): string | null {
|
||||
for (const header of setCookieHeaders) {
|
||||
const match = header.match(/^fa_session=([^;]+)/);
|
||||
if (match) return match[1];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export const actions = {
|
||||
login: async ({ request, cookies, fetch, url }) => {
|
||||
const data = await request.formData();
|
||||
@@ -17,44 +34,60 @@ export const actions = {
|
||||
return fail(400, { error: getErrorMessage('MISSING_CREDENTIALS') });
|
||||
}
|
||||
|
||||
const credentials = btoa(`${email}:${password}`);
|
||||
const authHeader = `Basic ${credentials}`;
|
||||
|
||||
// Raw fetch is intentional here: we need to pass an explicit Authorization
|
||||
// header built from the form data, not the cookie-based auth used elsewhere.
|
||||
const baseUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
|
||||
let response: Response;
|
||||
try {
|
||||
const baseUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
|
||||
const response = await fetch(`${baseUrl}/api/users/me`, {
|
||||
method: 'GET',
|
||||
headers: { Authorization: authHeader }
|
||||
});
|
||||
|
||||
if (response.status === 401 || response.status === 403) {
|
||||
return fail(401, { error: getErrorMessage('UNAUTHORIZED') });
|
||||
}
|
||||
|
||||
if (!response.ok) {
|
||||
return fail(500, { error: getErrorMessage('INTERNAL_ERROR') });
|
||||
}
|
||||
|
||||
// The cookie IS the API credential — promoted to `Authorization: Basic …`
|
||||
// on every browser → backend request by AuthTokenCookieFilter on the
|
||||
// Spring side (see #520). It must be Secure on HTTPS or it leaks
|
||||
// a 24h Basic token on plaintext networks. Dev runs over HTTP and
|
||||
// would silently lose the cookie if we hardcoded secure=true.
|
||||
const isHttps = url.protocol === 'https:';
|
||||
cookies.set('auth_token', authHeader, {
|
||||
path: '/',
|
||||
httpOnly: true,
|
||||
sameSite: 'strict',
|
||||
secure: isHttps,
|
||||
maxAge: 60 * 60 * 24
|
||||
response = await fetch(`${baseUrl}/api/auth/login`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ email, password })
|
||||
});
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
console.error('Login request failed', e);
|
||||
return fail(500, { error: getErrorMessage('INTERNAL_ERROR') });
|
||||
}
|
||||
|
||||
if (response.status === 401) {
|
||||
let code: ErrorCode = 'INVALID_CREDENTIALS';
|
||||
try {
|
||||
const body = (await response.json()) as { code?: string };
|
||||
if (body?.code) code = body.code as ErrorCode;
|
||||
} catch {
|
||||
// Body not JSON — fall through to INVALID_CREDENTIALS
|
||||
}
|
||||
return fail(401, { error: getErrorMessage(code) });
|
||||
}
|
||||
|
||||
if (!response.ok) {
|
||||
return fail(response.status, { error: getErrorMessage('INTERNAL_ERROR') });
|
||||
}
|
||||
|
||||
// Extract fa_session id from the Set-Cookie header and re-emit to the browser.
|
||||
// Modern Node/Undici exposes getSetCookie(); fall back to a single header for older runtimes.
|
||||
const setCookieHeaders =
|
||||
typeof response.headers.getSetCookie === 'function'
|
||||
? response.headers.getSetCookie()
|
||||
: response.headers.get('set-cookie')
|
||||
? [response.headers.get('set-cookie')!]
|
||||
: [];
|
||||
const sessionId = extractFaSessionId(setCookieHeaders);
|
||||
if (!sessionId) {
|
||||
console.error('Backend returned 200 OK on login but no fa_session cookie');
|
||||
return fail(500, { error: getErrorMessage('INTERNAL_ERROR') });
|
||||
}
|
||||
|
||||
const isHttps = url.protocol === 'https:';
|
||||
cookies.set('fa_session', sessionId, {
|
||||
path: '/',
|
||||
httpOnly: true,
|
||||
sameSite: 'strict',
|
||||
secure: isHttps,
|
||||
maxAge: 60 * 60 * 8 // 8h — must match backend spring.session.timeout
|
||||
});
|
||||
|
||||
// Best-effort cleanup of the legacy Basic-auth cookie from older deployments.
|
||||
cookies.delete('auth_token', { path: '/' });
|
||||
|
||||
return redirect(303, '/');
|
||||
}
|
||||
} satisfies Actions;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user