Compare commits

..

2 Commits

Author SHA1 Message Date
Marcel
1ec4815e24 ci: add npm run build step to unit-tests job
Some checks failed
CI / Unit & Component Tests (push) Failing after 5m20s
CI / OCR Service Tests (push) Successful in 51s
CI / Backend Unit Tests (push) Failing after 3m28s
CI / Unit & Component Tests (pull_request) Failing after 4m20s
CI / OCR Service Tests (pull_request) Successful in 52s
CI / Backend Unit Tests (pull_request) Failing after 3m17s
The prerender fix only prevents regression if the build is actually run in
CI. Without this gate, a future prerendered route that becomes unreachable
behind auth would fail silently until someone runs the build manually.

Fits after the test step in the existing unit-tests job — no new job needed
since node_modules is already cached for the Playwright container.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-05-09 08:52:20 +02:00
Marcel
a7bbf2424f fix(build): add prerender entry for /hilfe/transkription
The SvelteKit prerender crawler cannot reach this route because
hooks.server.ts redirects all non-public paths to /login before the
crawler follows links. Explicitly listing the route in kit.prerender.entries
tells SvelteKit to render it directly without crawling.

Also removes a misleading comment that claimed the auth hook guards
prerendered static files — it does not. Prerendered HTML is served as a
static file by the reverse proxy; hooks.server.ts only runs for SSR requests.

Closes #472

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-05-09 08:51:45 +02:00
282 changed files with 1324 additions and 23792 deletions

View File

@@ -2,7 +2,6 @@ name: CI
on:
push:
branches: [main]
pull_request:
jobs:
@@ -33,116 +32,21 @@ jobs:
run: npx @inlang/paraglide-js compile --project ./project.inlang --outdir ./src/lib/paraglide
working-directory: frontend
- name: Sync SvelteKit
run: npx svelte-kit sync
working-directory: frontend
- name: Lint
run: npm run lint
working-directory: frontend
- name: Assert no banned vi.mock patterns
shell: bash
run: |
# Literal pdfjs-dist (libLoader pattern — ADR 012)
if grep -rF "vi.mock('pdfjs-dist'" frontend/src/; then
echo "FAIL: banned vi.mock('pdfjs-dist') pattern found — see ADR 012. Use the libLoader prop injection pattern instead."
exit 1
fi
# Async factory with dynamic import in body (named mechanism — ADR 012 / #553).
# Multiline PCRE matches `vi.mock(<arg>, async ... { ... await import(...) ... })`
# across line breaks. __meta__ is excluded because it contains fixture strings
# demonstrating the very pattern this check is meant to forbid.
if grep -rPzln 'vi\.mock\([^)]+,\s*async[^{]*\{[\s\S]*?await\s+import\s*\(' \
--include='*.spec.ts' --include='*.test.ts' \
--exclude-dir='__meta__' \
frontend/src/; then
echo "FAIL: banned async vi.mock factory with dynamic import in body — see ADR 012 / #553. Use a synchronous factory + vi.hoisted instead."
exit 1
fi
- name: Assert no (upload|download)-artifact past v3
shell: bash
run: |
# Self-test: verify the regex catches v4+ and does not catch v3.
tmp=$(mktemp)
printf ' uses: actions/upload-artifact@v5\n' > "$tmp"
grep -qP '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' "$tmp" \
|| { echo "FAIL: guard self-test — regex missed upload-artifact@v5"; rm "$tmp"; exit 1; }
printf ' uses: actions/upload-artifact@v3\n' > "$tmp"
grep -qvP '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' "$tmp" \
|| { echo "FAIL: guard self-test — regex incorrectly flagged upload-artifact@v3"; rm "$tmp"; exit 1; }
rm "$tmp"
# Guard: Gitea Actions (act_runner) does not implement the v4 artifact protocol.
# Both upload-artifact and download-artifact share the same incompatibility.
# Pin to @v3. See ADR-014 / #557.
if grep -RPn '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' .gitea/workflows/; then
echo "::error::actions/(upload|download)-artifact@v4+ is unsupported on Gitea Actions (act_runner). Pin to @v3. See ADR-014 / #557."
exit 1
fi
- name: Run unit and component tests with coverage
shell: bash
run: |
set -eo pipefail
npm run test:coverage 2>&1 | tee /tmp/coverage-test-${{ github.run_id }}.log
- name: Run unit and component tests
run: npm test
working-directory: frontend
env:
TZ: Europe/Berlin
# Diagnostic guard: covers the coverage run only. If `npm test` (above)
# exits 1 with a birpc error, the named pattern appears here — not there.
- name: Assert no birpc teardown race in coverage run
shell: bash
if: always()
run: |
if grep -qF "[birpc] rpc is closed" /tmp/coverage-test-${{ github.run_id }}.log 2>/dev/null; then
echo "FAIL: [birpc] rpc is closed teardown race detected in coverage run"
grep -F "[birpc] rpc is closed" /tmp/coverage-test-${{ github.run_id }}.log
exit 1
fi
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- name: Upload coverage reports
if: always()
uses: actions/upload-artifact@v3
with:
name: coverage-reports
path: |
frontend/coverage/
/tmp/coverage-test-${{ github.run_id }}.log
- name: Build frontend
run: npm run build
working-directory: frontend
# ── Prerender output is exactly the public help page ───────────────────
# SvelteKit prerender + crawl follows nav links and bakes "redirect to
# /login" HTML for every protected route, served BEFORE runtime hooks
# (see #514). With `crawl: false` only the explicit entry should land
# in build/prerendered/. Anything else is a regression — fail the build.
- name: Assert prerender output is only /hilfe/transkription
run: |
cd frontend
set -e
extra=$(find build/prerendered -type f \
-not -path 'build/prerendered/hilfe/*' \
-not -name '*.br' -not -name '*.gz' \
|| true)
if [ -n "$extra" ]; then
echo "FAIL: unexpected prerendered files (would shadow runtime hooks):"
echo "$extra"
exit 1
fi
# And the help page must still be there.
test -f build/prerendered/hilfe/transkription.html \
|| { echo "FAIL: /hilfe/transkription.html missing from prerender output"; exit 1; }
echo "PASS: only /hilfe/transkription.html prerendered."
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- name: Upload screenshots
if: always()
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: unit-test-screenshots
path: frontend/test-results/screenshots/
@@ -174,8 +78,6 @@ jobs:
runs-on: ubuntu-latest
env:
DOCKER_API_VERSION: "1.43" # NAS runner runs Docker 24.x (max API 1.43); Testcontainers 2.x defaults to 1.44
DOCKER_HOST: unix:///var/run/docker.sock
TESTCONTAINERS_RYUK_DISABLED: "true"
steps:
- uses: actions/checkout@v4
@@ -195,132 +97,4 @@ jobs:
run: |
chmod +x mvnw
./mvnw clean test
working-directory: backend
- name: Upload surefire reports
if: always()
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
uses: actions/upload-artifact@v3
with:
name: surefire-reports
path: backend/target/surefire-reports/
# ─── fail2ban Regex Regression ────────────────────────────────────────────────
# The filter parses Caddy's JSON access log; a Caddy upgrade that reorders
# the JSON keys would silently break it (fail2ban-regex would return
# "0 matches", fail2ban would stop banning, no error surface). This job
# pins the contract against a deterministic sample line.
fail2ban-regex:
name: fail2ban Regex
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install fail2ban
run: |
sudo apt-get update
sudo apt-get install -y fail2ban
- name: Matches /api/auth/login 401
run: |
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/login"},"status":401}' > /tmp/sample.log
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
echo "$out"
echo "$out" | grep -qE '1 matched' \
|| { echo "expected 1 match for /api/auth/login 401"; exit 1; }
- name: Matches /api/auth/login 429
run: |
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/login"},"status":429}' > /tmp/sample.log
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
echo "$out"
echo "$out" | grep -qE '1 matched' \
|| { echo "expected 1 match for /api/auth/login 429"; exit 1; }
- name: Matches /api/auth/forgot-password 401
run: |
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/forgot-password"},"status":401}' > /tmp/sample.log
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
echo "$out"
echo "$out" | grep -qE '1 matched' \
|| { echo "expected 1 match for /api/auth/forgot-password 401"; exit 1; }
- name: Does not match /api/auth/login 200
run: |
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/login"},"status":200}' > /tmp/sample.log
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
echo "$out"
echo "$out" | grep -qE '0 matched' \
|| { echo "expected 0 matches for /api/auth/login 200"; exit 1; }
- name: Does not match /api/documents (unrelated 401)
run: |
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"GET","host":"archiv.raddatz.cloud","uri":"/api/documents"},"status":401}' > /tmp/sample.log
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
echo "$out"
echo "$out" | grep -qE '0 matched' \
|| { echo "expected 0 matches for /api/documents 401"; exit 1; }
# ── Backend resolves to file-polling, not systemd ─────────────────────
# The Debian/Ubuntu fail2ban package ships defaults-debian.conf with
# `[DEFAULT] backend = systemd`. Without `backend = polling` in our
# jail, the daemon loads the jail but reads from journald and never
# touches /var/log/caddy/access.log — i.e. the regex above passes in
# isolation while the live jail is inert. See issue #503.
- name: Jail resolves with polling backend (not inherited systemd)
run: |
sudo ln -sfn "$PWD/infra/fail2ban/jail.d/familienarchiv.conf" /etc/fail2ban/jail.d/familienarchiv.conf
sudo ln -sfn "$PWD/infra/fail2ban/filter.d/familienarchiv-auth.conf" /etc/fail2ban/filter.d/familienarchiv-auth.conf
dump=$(sudo fail2ban-client -d 2>&1)
echo "$dump" | grep -E "add.*familienarchiv-auth" || true
echo "$dump" | grep -qE "\['add', 'familienarchiv-auth', 'polling'\]" \
|| { echo "FAIL: familienarchiv-auth jail did not resolve to 'polling' backend"; exit 1; }
# ─── Compose Bucket-Bootstrap Idempotency ─────────────────────────────────────
# docker-compose.prod.yml's create-buckets service runs on every
# `docker compose up` (one-shot, no restart). Must be idempotent — a
# re-deploy must not fail just because the bucket / user / policy
# already exists. Validated by running create-buckets twice against a
# throwaway minio stack and asserting both invocations exit 0.
compose-idempotency:
name: Compose Bucket Idempotency
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Write stub env file
run: |
cat > .env.test <<'EOF'
TAG=test
PORT_BACKEND=18080
PORT_FRONTEND=13000
APP_DOMAIN=localhost
POSTGRES_PASSWORD=stub
MINIO_PASSWORD=stubrootpassword
MINIO_APP_PASSWORD=stubapppassword
OCR_TRAINING_TOKEN=stub
APP_ADMIN_USERNAME=admin@local
APP_ADMIN_PASSWORD=stub
MAIL_HOST=mailpit
MAIL_PORT=1025
APP_MAIL_FROM=noreply@local
IMPORT_HOST_DIR=/tmp/dummy-import
EOF
- name: Bring up minio
run: |
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test up -d --wait minio
- name: First create-buckets run
run: |
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test run --rm create-buckets
- name: Second create-buckets run (idempotency check)
run: |
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test run --rm create-buckets
- name: Teardown
if: always()
run: |
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test down -v
rm -f .env.test
working-directory: backend

View File

@@ -1,65 +0,0 @@
name: Coverage Flake Probe
# Manually-triggered probe for the birpc teardown race documented in ADR 012
# / #553. Runs the full coverage suite 20× in parallel against a single SHA
# and asserts zero `[birpc] rpc is closed` lines across every cell. Verifies
# the acceptance criterion that the race no longer surfaces under coverage.
on:
workflow_dispatch:
jobs:
coverage-flake-probe:
name: Coverage flake probe (run ${{ matrix.run }})
runs-on: ubuntu-latest
container:
image: mcr.microsoft.com/playwright:v1.58.2-noble
strategy:
fail-fast: false
matrix:
run: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
steps:
- uses: actions/checkout@v4
- name: Cache node_modules
id: node-modules-cache
uses: actions/cache@v4
with:
path: frontend/node_modules
key: node-modules-${{ hashFiles('frontend/package-lock.json') }}
- name: Install dependencies
if: steps.node-modules-cache.outputs.cache-hit != 'true'
run: npm ci
working-directory: frontend
- name: Compile Paraglide i18n
run: npx @inlang/paraglide-js compile --project ./project.inlang --outdir ./src/lib/paraglide
working-directory: frontend
- name: Run unit and component tests with coverage
shell: bash
run: |
set -eo pipefail
npm run test:coverage 2>&1 | tee /tmp/coverage-test-${{ github.run_id }}-${{ matrix.run }}.log
working-directory: frontend
env:
TZ: Europe/Berlin
- name: Assert no birpc teardown race
shell: bash
if: always()
run: |
if grep -qF "[birpc] rpc is closed" /tmp/coverage-test-${{ github.run_id }}-${{ matrix.run }}.log 2>/dev/null; then
echo "FAIL: [birpc] rpc is closed teardown race detected in run ${{ matrix.run }}"
grep -F "[birpc] rpc is closed" /tmp/coverage-test-${{ github.run_id }}-${{ matrix.run }}.log
exit 1
fi
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- name: Upload coverage log on failure
if: failure()
uses: actions/upload-artifact@v3
with:
name: coverage-log-run-${{ matrix.run }}
path: /tmp/coverage-test-${{ github.run_id }}-${{ matrix.run }}.log

View File

@@ -1,204 +0,0 @@
name: nightly
# Builds and deploys the staging environment from main every night.
# Runs on the self-hosted runner using Docker-out-of-Docker (the docker
# socket is mounted in), so `docker compose build` produces images on
# the host daemon and `docker compose up` consumes them directly — no
# registry hop.
#
# Operational assumptions (see docs/DEPLOYMENT.md §3 for the full setup):
#
# 1. Single-tenant self-hosted runner. The "Write staging env file" step
# writes every secret to .env.staging on the runner filesystem; the
# `if: always()` cleanup step removes it. A multi-tenant runner
# would need to switch to docker compose --env-file <(stdin) instead.
#
# 2. Host docker layer cache is authoritative. There is no
# actions/cache; we rely on the host daemon to keep Maven and npm
# layers warm between runs. A `docker system prune` on the host
# will cause the next nightly build to be cold (510 min slower).
#
# Staging environment isolation:
# - project name: archiv-staging
# - host ports: backend 8081, frontend 3001
# - profile: staging (starts mailpit instead of a real SMTP relay)
#
# Required Gitea secrets:
# STAGING_POSTGRES_PASSWORD
# STAGING_MINIO_PASSWORD
# STAGING_MINIO_APP_PASSWORD
# STAGING_OCR_TRAINING_TOKEN
# STAGING_APP_ADMIN_USERNAME
# STAGING_APP_ADMIN_PASSWORD
on:
schedule:
- cron: "0 2 * * *"
workflow_dispatch:
env:
# Ensures the backend Dockerfile's `RUN --mount=type=cache` lines are
# honoured (Maven cache survives between runs).
DOCKER_BUILDKIT: "1"
jobs:
deploy-staging:
# `ubuntu-latest` matches our self-hosted runner's advertised label
# (the runner has labels: ubuntu-latest / ubuntu-24.04 / ubuntu-22.04).
# `self-hosted` would never match — no runner advertises it — so the
# job parks in the queue forever. ADR-011's "single-tenant" promise
# is at the repo level; sharing this runner between CI and deploys
# for the same repo is within that boundary.
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Write staging env file
run: |
cat > .env.staging <<EOF
TAG=nightly
PORT_BACKEND=8081
PORT_FRONTEND=3001
APP_DOMAIN=staging.raddatz.cloud
POSTGRES_PASSWORD=${{ secrets.STAGING_POSTGRES_PASSWORD }}
MINIO_PASSWORD=${{ secrets.STAGING_MINIO_PASSWORD }}
MINIO_APP_PASSWORD=${{ secrets.STAGING_MINIO_APP_PASSWORD }}
OCR_TRAINING_TOKEN=${{ secrets.STAGING_OCR_TRAINING_TOKEN }}
APP_ADMIN_USERNAME=${{ secrets.STAGING_APP_ADMIN_USERNAME }}
APP_ADMIN_PASSWORD=${{ secrets.STAGING_APP_ADMIN_PASSWORD }}
MAIL_HOST=mailpit
MAIL_PORT=1025
MAIL_USERNAME=
MAIL_PASSWORD=
MAIL_SMTP_AUTH=false
MAIL_STARTTLS_ENABLE=false
APP_MAIL_FROM=noreply@staging.raddatz.cloud
IMPORT_HOST_DIR=/srv/familienarchiv-staging/import
EOF
- name: Verify backend /import:ro mount is wired
# Regression guard for #526: the /admin/system mass-import card
# only works when the backend service mounts the host import
# payload at /import (read-only). If a future "compose cleanup"
# PR drops the volumes block, mass import silently breaks again.
# `compose config` renders both shorthand and longform mounts as
# `target: /import` + `read_only: true`, so we assert against
# the rendered form rather than the raw source YAML.
run: |
set -e
docker compose \
-f docker-compose.prod.yml \
-p archiv-staging \
--env-file .env.staging \
--profile staging \
config > /tmp/compose-rendered.yml
grep -q '^[[:space:]]*target: /import$' /tmp/compose-rendered.yml \
|| { echo "::error::backend is missing the /import bind mount (see #526)"; exit 1; }
grep -A2 '^[[:space:]]*target: /import$' /tmp/compose-rendered.yml \
| grep -q 'read_only: true' \
|| { echo "::error::backend /import mount is not read-only (see #526)"; exit 1; }
- name: Build images
# `--pull` forces re-fetching pinned base images so a CVE
# re-publication of the same tag (e.g. node:20.19.0-alpine3.21,
# postgres:16-alpine) is picked up instead of being served
# from the host's stale Docker layer cache.
run: |
docker compose \
-f docker-compose.prod.yml \
-p archiv-staging \
--env-file .env.staging \
--profile staging \
build --pull
- name: Deploy staging
run: |
docker compose \
-f docker-compose.prod.yml \
-p archiv-staging \
--env-file .env.staging \
--profile staging \
up -d --wait --remove-orphans
- name: Reload Caddy
# Apply any committed Caddyfile changes before smoke-testing the
# public surface. Without this step, a Caddyfile edit lands in the
# repo but Caddy keeps serving the previous config until someone
# reloads it manually — the smoke test would then catch a stale
# header or a still-proxied /actuator route rather than confirming
# the current config is live.
#
# The runner executes job steps inside Docker containers (DooD).
# `systemctl` is not present in container images and cannot reach
# the host's systemd directly. We use the Docker socket (mounted
# into every job container via runner-config.yaml) to spin up a
# privileged sibling container in the host PID namespace; nsenter
# then enters the host's namespaces so systemctl talks to the real
# host systemd daemon. No sudoers entry is required — the Docker
# socket already grants root-equivalent host access.
#
# Alpine is used: ~5 MB vs ~70 MB for ubuntu, no unnecessary
# tooling, and the digest is pinned so any upstream change requires
# an explicit bump PR. util-linux (which ships nsenter) is installed
# at run time; apk add takes ~1 s on the warm VPS cache.
#
# `reload` not `restart`: reload sends SIGHUP so Caddy re-reads its
# config in-process without dropping TLS connections. `restart`
# would briefly stop the service, losing in-flight requests.
#
# If Caddy is not running this step fails fast before the smoke test
# issues a misleading "port 443 refused" error.
run: |
docker run --rm --privileged --pid=host \
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
- name: Smoke test deployed environment
# Healthchecks confirm containers are healthy; they do NOT confirm the
# public surface works. This step catches: Caddy not reloaded, HSTS
# header dropped, /actuator block bypassed.
#
# --resolve pins staging.raddatz.cloud to the Docker bridge gateway IP
# (the host) so we do NOT depend on hairpin NAT on the host router.
# 127.0.0.1 cannot be used: job containers run in bridge network mode
# (runner-config.yaml), so 127.0.0.1 is the container's loopback, not
# the host's. The bridge gateway IS the host; Caddy binds 0.0.0.0:443
# and is therefore reachable from the container via that IP.
# SNI still uses the public hostname so the TLS cert validates correctly.
#
# Gateway detection reads /proc/net/route (always present, no package
# required) instead of `ip route` to avoid a dependency on iproute2.
# Field $2=="00000000" is the default route; field $3 is the gateway as
# a little-endian 32-bit hex value which awk decodes to dotted-decimal.
run: |
set -e
HOST="staging.raddatz.cloud"
URL="https://$HOST"
HOST_IP=$(awk 'NR>1 && $2=="00000000"{h=$3;printf "%d.%d.%d.%d\n",strtonum("0x"substr(h,7,2)),strtonum("0x"substr(h,5,2)),strtonum("0x"substr(h,3,2)),strtonum("0x"substr(h,1,2));exit}' /proc/net/route)
[ -n "$HOST_IP" ] || { echo "ERROR: could not detect Docker bridge gateway via /proc/net/route"; exit 1; }
RESOLVE="--resolve $HOST:443:$HOST_IP"
echo "Smoke test: $URL (pinned to $HOST_IP via bridge gateway)"
curl -fsS "$RESOLVE" --max-time 10 "$URL/login" -o /dev/null
# Pin the preload-list-eligible HSTS value, not just header presence:
# a degraded `max-age=1` or a dropped `includeSubDomains; preload` must
# fail this check rather than pass it silently.
curl -fsS "$RESOLVE" --max-time 10 -I "$URL/" \
| grep -Eqi 'strict-transport-security:[[:space:]]*max-age=31536000.*includeSubDomains.*preload'
# Permissions-Policy denies APIs the app does not use (camera,
# microphone, geolocation). A regression that loosens or drops the
# header now fails the smoke step.
curl -fsS "$RESOLVE" --max-time 10 -I "$URL/" \
| grep -Eqi 'permissions-policy:[[:space:]]*camera=\(\),[[:space:]]*microphone=\(\),[[:space:]]*geolocation=\(\)'
status=$(curl -s "$RESOLVE" -o /dev/null -w "%{http_code}" --max-time 10 "$URL/actuator/health")
[ "$status" = "404" ] || { echo "expected 404 from /actuator/health, got $status"; exit 1; }
echo "All smoke checks passed"
- name: Cleanup env file
# LOAD-BEARING: `if: always()` is the linchpin of the ADR-011
# single-tenant runner trust model. Every secret in .env.staging
# is plain text on the runner filesystem until this step runs.
# If a future refactor drops `if: always()`, a failed deploy
# leaves the env-file behind. Do not remove this conditional
# without first re-evaluating ADR-011.
if: always()
run: rm -f .env.staging

View File

@@ -1,143 +0,0 @@
name: release
# Builds and deploys the production environment on `v*` tag push.
# Runs on the self-hosted runner via Docker-out-of-Docker; images are
# tagged with the actual git tag (e.g. v1.0.0) so rollback is
# `TAG=<previous> docker compose -f docker-compose.prod.yml -p archiv-production up -d --wait`
#
# Operational assumptions (see docs/DEPLOYMENT.md §3 for the full setup):
#
# 1. Single-tenant self-hosted runner. The "Write production env file"
# step writes every secret to .env.production on the runner
# filesystem; the `if: always()` cleanup step removes it. A
# multi-tenant runner would need to switch to
# `docker compose --env-file <(stdin)` instead.
#
# 2. Host docker layer cache is authoritative. There is no
# actions/cache; we rely on the host daemon to keep Maven and npm
# layers warm between runs. A `docker system prune` on the host
# will cause the next release build to be cold (510 min slower).
#
# Production environment:
# - project name: archiv-production
# - host ports: backend 8080, frontend 3000
# - profile: (none) — mailpit is excluded; real SMTP relay is used
#
# Required Gitea secrets:
# PROD_POSTGRES_PASSWORD
# PROD_MINIO_PASSWORD
# PROD_MINIO_APP_PASSWORD
# PROD_OCR_TRAINING_TOKEN
# PROD_APP_ADMIN_USERNAME (CRITICAL: see docs/DEPLOYMENT.md)
# PROD_APP_ADMIN_PASSWORD (CRITICAL: locked in on first deploy)
# MAIL_HOST
# MAIL_PORT
# MAIL_USERNAME
# MAIL_PASSWORD
on:
push:
tags:
- "v*"
env:
DOCKER_BUILDKIT: "1"
jobs:
deploy-production:
# See nightly.yml — same rationale: `ubuntu-latest` matches the
# advertised label of our single-tenant self-hosted runner.
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Write production env file
run: |
cat > .env.production <<EOF
TAG=${{ gitea.ref_name }}
PORT_BACKEND=8080
PORT_FRONTEND=3000
APP_DOMAIN=archiv.raddatz.cloud
POSTGRES_PASSWORD=${{ secrets.PROD_POSTGRES_PASSWORD }}
MINIO_PASSWORD=${{ secrets.PROD_MINIO_PASSWORD }}
MINIO_APP_PASSWORD=${{ secrets.PROD_MINIO_APP_PASSWORD }}
OCR_TRAINING_TOKEN=${{ secrets.PROD_OCR_TRAINING_TOKEN }}
APP_ADMIN_USERNAME=${{ secrets.PROD_APP_ADMIN_USERNAME }}
APP_ADMIN_PASSWORD=${{ secrets.PROD_APP_ADMIN_PASSWORD }}
MAIL_HOST=${{ secrets.MAIL_HOST }}
MAIL_PORT=${{ secrets.MAIL_PORT }}
MAIL_USERNAME=${{ secrets.MAIL_USERNAME }}
MAIL_PASSWORD=${{ secrets.MAIL_PASSWORD }}
MAIL_SMTP_AUTH=true
MAIL_STARTTLS_ENABLE=true
APP_MAIL_FROM=noreply@raddatz.cloud
IMPORT_HOST_DIR=/srv/familienarchiv-production/import
EOF
- name: Build images
# `--pull` forces re-fetching pinned base images so a CVE
# re-publication of the same tag is picked up rather than served
# from the host's stale Docker layer cache.
run: |
docker compose \
-f docker-compose.prod.yml \
-p archiv-production \
--env-file .env.production \
build --pull
- name: Deploy production
run: |
docker compose \
-f docker-compose.prod.yml \
-p archiv-production \
--env-file .env.production \
up -d --wait --remove-orphans
- name: Reload Caddy
# See nightly.yml — same rationale and mechanism: DooD job containers
# cannot call systemctl directly; nsenter via a privileged sibling
# container reaches the host systemd. Must run after deploy (so the
# latest Caddyfile is on disk) and before the smoke test (so the
# public surface reflects the current config). Alpine with pinned
# digest; reload not restart — see nightly.yml for full rationale.
run: |
docker run --rm --privileged --pid=host \
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
- name: Smoke test deployed environment
# See nightly.yml — same three checks, against the prod vhost.
# --resolve pins to the bridge gateway IP (the host), not 127.0.0.1
# — see nightly.yml for the full network topology explanation.
run: |
set -e
HOST="archiv.raddatz.cloud"
URL="https://$HOST"
HOST_IP=$(ip route show default | awk '/default/ {print $3}')
[ -n "$HOST_IP" ] || { echo "ERROR: could not detect Docker bridge gateway via 'ip route'"; exit 1; }
RESOLVE="--resolve $HOST:443:$HOST_IP"
echo "Smoke test: $URL (pinned to $HOST_IP via bridge gateway)"
curl -fsS "$RESOLVE" --max-time 10 "$URL/login" -o /dev/null
# Pin the preload-list-eligible HSTS value, not just header presence:
# a degraded `max-age=1` or a dropped `includeSubDomains; preload` must
# fail this check rather than pass it silently.
curl -fsS "$RESOLVE" --max-time 10 -I "$URL/" \
| grep -Eqi 'strict-transport-security:[[:space:]]*max-age=31536000.*includeSubDomains.*preload'
# Permissions-Policy denies APIs the app does not use (camera,
# microphone, geolocation). A regression that loosens or drops the
# header now fails the smoke step.
curl -fsS "$RESOLVE" --max-time 10 -I "$URL/" \
| grep -Eqi 'permissions-policy:[[:space:]]*camera=\(\),[[:space:]]*microphone=\(\),[[:space:]]*geolocation=\(\)'
status=$(curl -s "$RESOLVE" -o /dev/null -w "%{http_code}" --max-time 10 "$URL/actuator/health")
[ "$status" = "404" ] || { echo "expected 404 from /actuator/health, got $status"; exit 1; }
echo "All smoke checks passed"
- name: Cleanup env file
# LOAD-BEARING: `if: always()` is the linchpin of the ADR-011
# single-tenant runner trust model. Every secret in
# .env.production is plain text on the runner filesystem until
# this step runs. If a future refactor drops `if: always()`, a
# failed deploy leaves the env-file behind. Do not remove this
# conditional without first re-evaluating ADR-011.
if: always()
run: rm -f .env.production

View File

@@ -159,7 +159,7 @@ Input DTOs live flat in the domain package. Response types are the model entitie
→ See [CONTRIBUTING.md §Error handling](./CONTRIBUTING.md#error-handling)
**LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` from service methods — never throw raw exceptions. When adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) add to `ErrorCode` type in `frontend/src/lib/shared/errors.ts`, (3) add a `case` in `getErrorMessage()`, (4) add i18n keys in `messages/{de,en,es}.json`.
**LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` from service methods — never throw raw exceptions. When adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) mirror in `frontend/src/lib/shared/errors.ts`, (3) add i18n keys in `messages/{de,en,es}.json`.
### Security / Permissions
@@ -202,7 +202,8 @@ frontend/src/routes/
├── profile/ User profile settings
├── users/[id]/ Public user profile page
├── login/ logout/ register/
── forgot-password/ reset-password/
── forgot-password/ reset-password/
└── demo/ Dev-only demos
```
### API Client Pattern

View File

@@ -273,16 +273,6 @@
</profiles>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
<systemPropertyVariables>
<junit.jupiter.execution.timeout.default>90 s</junit.jupiter.execution.timeout.default>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins>
</build>

View File

@@ -100,45 +100,7 @@ public interface DocumentRepository extends JpaRepository<Document, UUID>, JpaSp
ORDER BY ts_rank(d.search_vector, q.pq) DESC,
d.meta_date DESC NULLS LAST
""")
// Unpaged path — for bulk-edit "select all" and density chart
List<UUID> findAllMatchingIdsByFts(@Param("query") String query);
/**
* Returns one page of FTS-ranked document IDs with the total match count.
*
* <p>Each row contains (in column order):
* <ol>
* <li>UUID — document id</li>
* <li>double — ts_rank score</li>
* <li>long — COUNT(*) OVER () — full match count, not page count</li>
* </ol>
*
* <p>Returns an empty list when the query matches no documents (including
* stopword-only queries where websearch_to_tsquery returns an empty tsquery).
* Use findAllMatchingIdsByFts for the unpaged bulk-edit path.
*/
@Query(nativeQuery = true, value = """
WITH q AS (
SELECT CASE WHEN websearch_to_tsquery('german', :query)::text <> ''
THEN to_tsquery('simple', regexp_replace(
websearch_to_tsquery('german', :query)::text,
'''([^'']+)''',
'''\\1'':*',
'g'))
END AS pq
), matches AS (
SELECT d.id, ts_rank(d.search_vector, q.pq) AS rank
FROM documents d, q
WHERE d.search_vector @@ q.pq
)
SELECT id, rank, COUNT(*) OVER () AS total
FROM matches
ORDER BY rank DESC, id
OFFSET :offset LIMIT :limit
""")
List<Object[]> findFtsPageRaw(@Param("query") String query,
@Param("offset") int offset,
@Param("limit") int limit);
List<UUID> findRankedIdsByFts(@Param("query") String query);
/**
* Returns match-enrichment data for a set of documents identified by their IDs.

View File

@@ -162,7 +162,7 @@ public class DocumentService {
*/
private List<UUID> resolveFtsIds(String text) {
if (!StringUtils.hasText(text)) return null;
return documentRepository.findAllMatchingIdsByFts(text);
return documentRepository.findRankedIdsByFts(text);
}
/** Loads matching documents and projects to non-null {@link LocalDate}s. */
@@ -485,7 +485,7 @@ public class DocumentService {
boolean hasText = StringUtils.hasText(text);
List<UUID> rankedIds = null;
if (hasText) {
rankedIds = documentRepository.findAllMatchingIdsByFts(text);
rankedIds = documentRepository.findRankedIdsByFts(text);
if (rankedIds.isEmpty()) return List.of();
}
@@ -645,43 +645,39 @@ public class DocumentService {
// 1. Allgemeine Suche (für das Suchfeld im Frontend)
public DocumentSearchResult searchDocuments(String text, LocalDate from, LocalDate to, UUID sender, UUID receiver, List<String> tags, String tagQ, DocumentStatus status, DocumentSort sort, String dir, TagOperator tagOperator, Pageable pageable) {
boolean hasText = StringUtils.hasText(text);
// Pure-text RELEVANCE: push pagination into SQL — skip findAllMatchingIdsByFts entirely (ADR-008).
if (isPureTextRelevance(hasText, sort, from, to, sender, receiver, tags, tagQ, status)) {
return relevanceSortedPageFromSql(text, pageable);
}
List<UUID> rankedIds = null;
if (hasText) {
rankedIds = documentRepository.findAllMatchingIdsByFts(text);
rankedIds = documentRepository.findRankedIdsByFts(text);
if (rankedIds.isEmpty()) return DocumentSearchResult.of(List.of());
}
Specification<Document> spec = buildSearchSpec(
hasText, rankedIds, from, to, sender, receiver, tags, tagQ, status, tagOperator);
// SENDER and RECEIVER sorts load the full match set and slice in-memory.
// SENDER, RECEIVER and RELEVANCE sorts load the full match set and slice in memory.
// JPA's Sort.by("sender.lastName") generates an INNER JOIN that silently drops
// documents with null sender/receivers. Cost scales with match count —
// acceptable while documents stays under ~10k rows. (ADR-008)
// documents with null sender/receivers; RELEVANCE maps a DB order to an external
// rank list. Cost scales linearly with match count — acceptable while documents
// stays under ~10k rows. Past that, replace with SQL-level LEFT JOIN sort.
if (sort == DocumentSort.RECEIVER) {
// In-memory sort on page slice (≤ page size rows) — acceptable
List<Document> sorted = sortByFirstReceiver(documentRepository.findAll(spec), dir);
return buildResultPaged(pageSlice(sorted, pageable), text, pageable, sorted.size());
}
if (sort == DocumentSort.SENDER) {
// In-memory sort on page slice (≤ page size rows) — acceptable
List<Document> sorted = sortBySender(documentRepository.findAll(spec), dir);
return buildResultPaged(pageSlice(sorted, pageable), text, pageable, sorted.size());
}
// RELEVANCE with active filters: load filtered subset and sort in-memory by rank.
// RELEVANCE: default when text present and no explicit sort given
boolean useRankOrder = hasText && (sort == null || sort == DocumentSort.RELEVANCE);
if (useRankOrder) {
List<Document> results = documentRepository.findAll(spec);
Map<UUID, Integer> rankMap = new HashMap<>();
for (int i = 0; i < rankedIds.size(); i++) rankMap.put(rankedIds.get(i), i);
List<Document> sorted = documentRepository.findAll(spec).stream()
.sorted(Comparator.comparingInt(doc -> rankMap.getOrDefault(doc.getId(), Integer.MAX_VALUE)))
List<Document> sorted = results.stream()
.sorted(Comparator.comparingInt(
doc -> rankMap.getOrDefault(doc.getId(), Integer.MAX_VALUE)))
.toList();
return buildResultPaged(pageSlice(sorted, pageable), text, pageable, sorted.size());
}
@@ -692,39 +688,6 @@ public class DocumentService {
return buildResultPaged(page.getContent(), text, pageable, page.getTotalElements());
}
private static boolean isPureTextRelevance(boolean hasText, DocumentSort sort,
LocalDate from, LocalDate to, UUID sender, UUID receiver,
List<String> tags, String tagQ, DocumentStatus status) {
return hasText && (sort == null || sort == DocumentSort.RELEVANCE)
&& from == null && to == null && sender == null && receiver == null
&& (tags == null || tags.isEmpty()) && (tagQ == null || tagQ.isBlank()) && status == null;
}
/**
* Pure-text RELEVANCE path — pagination and ts_rank ordering pushed into SQL.
* Called when no non-text filters are active (ADR-008).
*/
private DocumentSearchResult relevanceSortedPageFromSql(String text, Pageable pageable) {
long rawOffset = pageable.getOffset();
if (rawOffset > Integer.MAX_VALUE) return DocumentSearchResult.of(List.of());
int offset = (int) rawOffset;
int limit = pageable.getPageSize();
FtsPage ftsPage = toFtsPage(documentRepository.findFtsPageRaw(text, offset, limit));
if (ftsPage.hits().isEmpty()) return DocumentSearchResult.of(List.of());
// Preserve ts_rank order from SQL across the JPA findAllById call.
Map<UUID, Integer> rankMap = new HashMap<>();
List<UUID> pageIds = new ArrayList<>();
for (int i = 0; i < ftsPage.hits().size(); i++) {
rankMap.put(ftsPage.hits().get(i).id(), i);
pageIds.add(ftsPage.hits().get(i).id());
}
List<Document> docs = documentRepository.findAllById(pageIds).stream()
.sorted(Comparator.comparingInt(d -> rankMap.getOrDefault(d.getId(), Integer.MAX_VALUE)))
.toList();
return buildResultPaged(docs, text, pageable, ftsPage.total());
}
private static <T> List<T> pageSlice(List<T> sorted, Pageable pageable) {
int from = Math.min((int) pageable.getOffset(), sorted.size());
int to = Math.min(from + pageable.getPageSize(), sorted.size());
@@ -1050,28 +1013,6 @@ public class DocumentService {
return result;
}
private static final int COL_ID = 0;
private static final int COL_RANK = 1;
private static final int COL_TOTAL = 2;
/**
* Maps raw Object[] rows from {@link DocumentRepository#findFtsPageRaw} to an
* {@link FtsPage}. Uses pattern-matching UUID cast to guard against driver-level
* type variance (some JDBC drivers return UUID as String).
*/
private static FtsPage toFtsPage(List<Object[]> rows) {
if (rows.isEmpty()) return new FtsPage(List.of(), 0);
long total = ((Number) rows.get(0)[COL_TOTAL]).longValue();
List<FtsHit> hits = rows.stream()
.map(r -> {
UUID id = r[COL_ID] instanceof UUID u ? u : UUID.fromString(r[COL_ID].toString());
double rank = ((Number) r[COL_RANK]).doubleValue();
return new FtsHit(id, rank);
})
.toList();
return new FtsPage(hits, total);
}
/** Clean text + highlight offsets parsed from a {@code ts_headline} sentinel-delimited string. */
public record ParsedHighlight(String cleanText, List<MatchOffset> offsets) {}

View File

@@ -1,6 +0,0 @@
package org.raddatz.familienarchiv.document;
import java.util.UUID;
/** A single document hit from a paginated FTS query — id and its ts_rank score. */
record FtsHit(UUID id, double rank) {}

View File

@@ -1,6 +0,0 @@
package org.raddatz.familienarchiv.document;
import java.util.List;
/** One page of FTS results — the ranked hit list for this page and the total match count. */
record FtsPage(List<FtsHit> hits, long total) {}

View File

@@ -27,9 +27,7 @@ public class CommentController {
// ─── Block (transcription) comments ────────────────────────────────────────
@GetMapping("/api/documents/{documentId}/transcription-blocks/{blockId}/comments")
public List<DocumentComment> getBlockComments(
@PathVariable UUID documentId,
@PathVariable UUID blockId) {
public List<DocumentComment> getBlockComments(@PathVariable UUID blockId) {
return commentService.getCommentsForBlock(blockId);
}
@@ -50,7 +48,6 @@ public class CommentController {
@RequirePermission({Permission.ANNOTATE_ALL, Permission.WRITE_ALL})
public DocumentComment replyToBlockComment(
@PathVariable UUID documentId,
@PathVariable UUID blockId,
@PathVariable UUID commentId,
@RequestBody CreateCommentDTO dto,
Authentication authentication) {

View File

@@ -30,8 +30,6 @@ public enum ErrorCode {
// --- Users ---
/** A user with the given ID or username does not exist. 404 */
USER_NOT_FOUND,
/** A group with the given ID does not exist. 404 */
GROUP_NOT_FOUND,
/** The supplied email address is already used by another account. 409 */
EMAIL_ALREADY_IN_USE,
/** The supplied current password does not match the stored hash. 400 */
@@ -54,8 +52,6 @@ public enum ErrorCode {
INVITE_REVOKED,
/** The invite has passed its expiry date. 410 */
INVITE_EXPIRED,
/** A group cannot be deleted because one or more active invites reference it. 409 */
GROUP_HAS_ACTIVE_INVITES,
// --- Auth ---
/** The request is not authenticated. 401 */

View File

@@ -1,6 +1,5 @@
package org.raddatz.familienarchiv.importing;
import com.fasterxml.jackson.annotation.JsonIgnore;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.poi.ss.usermodel.*;
@@ -53,9 +52,9 @@ public class MassImportService {
public enum State { IDLE, RUNNING, DONE, FAILED }
public record ImportStatus(State state, String statusCode, @JsonIgnore String message, int processed, LocalDateTime startedAt) {}
public record ImportStatus(State state, String message, int processed, LocalDateTime startedAt) {}
private volatile ImportStatus currentStatus = new ImportStatus(State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
private volatile ImportStatus currentStatus = new ImportStatus(State.IDLE, "Kein Import gestartet.", 0, null);
public ImportStatus getStatus() {
return currentStatus;
@@ -100,9 +99,7 @@ public class MassImportService {
@Value("${app.import.col.transcription:13}")
private int colTranscription;
@Value("${app.import.dir:/import}")
private String importDir;
private static final String IMPORT_DIR = "/import";
private static final DateTimeFormatter GERMAN_DATE = DateTimeFormatter.ofPattern("d. MMMM yyyy", Locale.GERMAN);
// ODS XML namespaces
@@ -117,39 +114,30 @@ public class MassImportService {
if (currentStatus.state() == State.RUNNING) {
throw DomainException.conflict(ErrorCode.IMPORT_ALREADY_RUNNING, "A mass import is already in progress");
}
currentStatus = new ImportStatus(State.RUNNING, "IMPORT_RUNNING", "Import läuft...", 0, LocalDateTime.now());
currentStatus = new ImportStatus(State.RUNNING, "Import läuft...", 0, LocalDateTime.now());
try {
File spreadsheet = findSpreadsheetFile();
log.info("Starte Massenimport aus: {}", spreadsheet.getAbsolutePath());
int processed = processRows(readSpreadsheet(spreadsheet));
currentStatus = new ImportStatus(State.DONE, "IMPORT_DONE",
currentStatus = new ImportStatus(State.DONE,
"Import abgeschlossen. " + processed + " Dokumente verarbeitet.",
processed, currentStatus.startedAt());
} catch (NoSpreadsheetException e) {
log.error("Massenimport fehlgeschlagen: keine Tabellendatei", e);
currentStatus = new ImportStatus(State.FAILED, "IMPORT_FAILED_NO_SPREADSHEET",
"Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
} catch (Exception e) {
log.error("Massenimport fehlgeschlagen", e);
currentStatus = new ImportStatus(State.FAILED, "IMPORT_FAILED_INTERNAL",
"Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
currentStatus = new ImportStatus(State.FAILED, "Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
}
}
private static class NoSpreadsheetException extends RuntimeException {
NoSpreadsheetException(String message) { super(message); }
}
private File findSpreadsheetFile() throws IOException {
try (Stream<Path> files = Files.list(Paths.get(importDir))) {
try (Stream<Path> files = Files.list(Paths.get(IMPORT_DIR))) {
return files
.filter(p -> {
String name = p.toString().toLowerCase();
return name.endsWith(".ods") || name.endsWith(".xlsx") || name.endsWith(".xls");
})
.findFirst()
.orElseThrow(() -> new NoSpreadsheetException(
"Keine Tabellendatei (.ods/.xlsx/.xls) in " + importDir + " gefunden!"))
.orElseThrow(() -> new RuntimeException(
"Keine Tabellendatei (.ods/.xlsx/.xls) in " + IMPORT_DIR + " gefunden!"))
.toFile();
}
}
@@ -390,7 +378,7 @@ public class MassImportService {
}
private Optional<File> findFileRecursive(String filename) {
try (Stream<Path> walk = Files.walk(Paths.get(importDir))) {
try (Stream<Path> walk = Files.walk(Paths.get(IMPORT_DIR))) {
return walk.filter(p -> !Files.isDirectory(p))
.filter(p -> p.getFileName().toString().equals(filename))
.map(Path::toFile)

View File

@@ -1,137 +0,0 @@
package org.raddatz.familienarchiv.security;
import jakarta.servlet.FilterChain;
import jakarta.servlet.ServletException;
import jakarta.servlet.http.Cookie;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletRequestWrapper;
import jakarta.servlet.http.HttpServletResponse;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpHeaders;
import org.springframework.stereotype.Component;
import org.springframework.web.filter.OncePerRequestFilter;
import java.io.IOException;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.Enumeration;
/**
* Promotes the {@code auth_token} cookie to an {@code Authorization} header
* so that browser-side requests to {@code /api/*} authenticate the same way
* SSR fetches do.
*
* <p>The SvelteKit login action stores the full HTTP Basic header value
* ({@code "Basic <base64>"}) in an HttpOnly cookie. SSR fetches from
* {@code hooks.server.ts} read the cookie and pass it explicitly as the
* {@code Authorization} header. In the dev environment, Vite's proxy does
* the same on every {@code /api/*} request (see {@code vite.config.ts}).
* In production, Caddy proxies {@code /api/*} straight to the backend and
* does NOT translate the cookie — so client-side {@code fetch} and
* {@code EventSource} calls reach the backend without auth, get
* {@code 401 WWW-Authenticate: Basic}, and the browser pops a native dialog.
*
* <p>This filter closes that gap: if a request has an {@code auth_token}
* cookie but no explicit {@code Authorization} header, promote the cookie
* value (URL-decoded) into the header before Spring Security inspects it.
* Explicit {@code Authorization} headers are preserved unchanged.
*
* <p>See #520. Filter runs at {@code Ordered.HIGHEST_PRECEDENCE} so it
* mutates the request before any Spring Security filter sees it.
*
* <p><b>Scope:</b> only {@code /api/*} requests are touched. The
* {@code /actuator/*} block in Caddy plus the open auth/reset paths in
* {@link SecurityConfig} must NOT receive a promoted Authorization.
*
* <p><b>⚠ Log-leakage warning:</b> the wrapped request exposes the
* Authorization header via {@code getHeaderNames}/{@code getHeaders}. Any
* filter or interceptor that iterates request headers will see the live
* Basic credential. Do NOT add a request-header logger downstream of this
* filter without explicitly scrubbing the {@code Authorization} field.
*/
@Component
@Order(org.springframework.core.Ordered.HIGHEST_PRECEDENCE)
public class AuthTokenCookieFilter extends OncePerRequestFilter {
static final String COOKIE_NAME = "auth_token";
static final String SCOPE_PREFIX = "/api/";
@Override
protected void doFilterInternal(HttpServletRequest request,
HttpServletResponse response,
FilterChain chain) throws ServletException, IOException {
// Scope: only /api/* needs cookie promotion. /actuator/health (open),
// /api/auth/forgot-password (open), /login etc. don't.
if (!request.getRequestURI().startsWith(SCOPE_PREFIX)) {
chain.doFilter(request, response);
return;
}
// An explicit Authorization header wins — this is the SSR fetch path
// (hooks.server.ts builds the header itself).
if (request.getHeader(HttpHeaders.AUTHORIZATION) != null) {
chain.doFilter(request, response);
return;
}
Cookie[] cookies = request.getCookies();
if (cookies == null) {
chain.doFilter(request, response);
return;
}
for (Cookie c : cookies) {
if (COOKIE_NAME.equals(c.getName()) && c.getValue() != null && !c.getValue().isBlank()) {
String decoded;
try {
decoded = URLDecoder.decode(c.getValue(), StandardCharsets.UTF_8);
} catch (IllegalArgumentException malformed) {
// Malformed percent-encoding — refuse to forward a bogus
// Authorization header. Spring Security will treat the
// request as unauthenticated.
chain.doFilter(request, response);
return;
}
chain.doFilter(new AuthHeaderRequest(request, decoded), response);
return;
}
}
chain.doFilter(request, response);
}
/**
* Adds (or overrides) the {@code Authorization} header on a wrapped request.
* All other headers pass through unchanged.
*/
static final class AuthHeaderRequest extends HttpServletRequestWrapper {
private final String authorization;
AuthHeaderRequest(HttpServletRequest request, String authorization) {
super(request);
this.authorization = authorization;
}
@Override
public String getHeader(String name) {
if (HttpHeaders.AUTHORIZATION.equalsIgnoreCase(name)) {
return authorization;
}
return super.getHeader(name);
}
@Override
public Enumeration<String> getHeaders(String name) {
if (HttpHeaders.AUTHORIZATION.equalsIgnoreCase(name)) {
return Collections.enumeration(Collections.singletonList(authorization));
}
return super.getHeaders(name);
}
@Override
public Enumeration<String> getHeaderNames() {
Enumeration<String> base = super.getHeaderNames();
java.util.Set<String> names = new java.util.LinkedHashSet<>();
while (base.hasMoreElements()) names.add(base.nextElement());
names.add(HttpHeaders.AUTHORIZATION);
return Collections.enumeration(names);
}
}
}

View File

@@ -37,20 +37,12 @@ public class SecurityConfig {
@Bean
public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
http
// CSRF is intentionally disabled. With the cookie-promotion model
// (auth_token cookie → Authorization header via AuthTokenCookieFilter,
// see #520), every authenticated request to /api/* now carries the
// credential automatically once the cookie is set. The CSRF defence
// for state-changing endpoints is therefore LOAD-BEARING on:
//
// 1. SameSite=strict on the auth_token cookie (login/+page.server.ts).
// A cross-site POST from evil.com cannot include the cookie.
// 2. CORS — Spring's default rejects cross-origin requests with
// credentials unless explicitly allowed (no allowedOrigins config).
//
// If either of those is ever weakened (e.g. cookie flipped to
// SameSite=lax, CORS allowedOrigins expanded), CSRF protection
// MUST be re-enabled here.
// CSRF is intentionally disabled: every request from the SvelteKit frontend
// carries an explicit Authorization header (Basic Auth token injected by
// hooks.server.ts). Browsers block cross-origin requests from setting custom
// headers, so cross-site request forgery via a third-party page is not
// possible with this auth scheme. If the auth model ever changes to
// cookie-based sessions, CSRF protection must be re-enabled.
.csrf(csrf -> csrf.disable())
.authorizeHttpRequests(auth -> {

View File

@@ -88,8 +88,7 @@ public class AppUser {
};
public static String computeColor(UUID id) {
// Math.floorMod avoids the Integer.MIN_VALUE overflow trap in Math.abs(hashCode())
return PALETTE[Math.floorMod(id.hashCode(), PALETTE.length)];
return PALETTE[Math.abs(id.hashCode()) % PALETTE.length];
}
@PrePersist

View File

@@ -52,11 +52,7 @@ public class InviteService {
public InviteToken createInvite(CreateInviteRequest dto, AppUser creator) {
Set<UUID> groupIds = new HashSet<>();
if (dto.getGroupIds() != null && !dto.getGroupIds().isEmpty()) {
Set<UUID> uniqueIds = new HashSet<>(dto.getGroupIds());
List<UserGroup> groups = userService.findGroupsByIds(new ArrayList<>(uniqueIds));
if (groups.size() != uniqueIds.size()) {
throw DomainException.notFound(ErrorCode.GROUP_NOT_FOUND, "One or more group IDs do not exist");
}
List<UserGroup> groups = userService.findGroupsByIds(dto.getGroupIds());
groups.forEach(g -> groupIds.add(g.getId()));
}

View File

@@ -24,7 +24,4 @@ public interface InviteTokenRepository extends JpaRepository<InviteToken, UUID>
@Query("SELECT t FROM InviteToken t ORDER BY t.createdAt DESC")
List<InviteToken> findAllOrderedByCreatedAt();
@Query("SELECT CASE WHEN COUNT(t) > 0 THEN true ELSE false END FROM InviteToken t JOIN t.groupIds g WHERE g = :groupId AND t.revoked = false AND (t.expiresAt IS NULL OR t.expiresAt > CURRENT_TIMESTAMP) AND (t.maxUses IS NULL OR t.useCount < t.maxUses)")
boolean existsActiveWithGroupId(@Param("groupId") UUID groupId);
}

View File

@@ -20,7 +20,6 @@ import org.springframework.boot.CommandLineRunner;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Profile;
import org.springframework.core.env.Environment;
import org.springframework.security.crypto.password.PasswordEncoder;
import java.time.LocalDate;
@@ -32,51 +31,26 @@ import java.util.Set;
@DependsOn("flyway")
public class UserDataInitializer {
static final String DEFAULT_ADMIN_EMAIL = "admin@familienarchiv.local";
static final String DEFAULT_ADMIN_PASSWORD = "admin123";
@Value("${app.admin.email:" + DEFAULT_ADMIN_EMAIL + "}")
@Value("${app.admin.email:admin@familyarchive.local}")
private String adminEmail;
@Value("${app.admin.password:" + DEFAULT_ADMIN_PASSWORD + "}")
@Value("${app.admin.password:admin123}")
private String adminPassword;
private final AppUserRepository userRepository;
private final UserGroupRepository groupRepository;
private final Environment environment;
@Bean
public CommandLineRunner initAdminUser(PasswordEncoder passwordEncoder) {
return args -> {
if (userRepository.findByEmail(adminEmail).isEmpty()) {
// Fail-closed in production: refuse to seed with the well-known
// defaults. Otherwise an operator who forgets APP_ADMIN_USERNAME
// / APP_ADMIN_PASSWORD locks production to admin@…/admin123 PERMANENTLY
// (UserDataInitializer only seeds when the row is missing — see #513).
// Allowed in dev/test/e2e because those run without secrets configured.
boolean isLocalProfile = environment.matchesProfiles("dev", "test", "e2e");
if (!isLocalProfile
&& (DEFAULT_ADMIN_EMAIL.equals(adminEmail)
|| DEFAULT_ADMIN_PASSWORD.equals(adminPassword))) {
throw new IllegalStateException(
"Refusing to seed admin user with default credentials outside "
+ "the dev/test/e2e profiles. Set APP_ADMIN_USERNAME and "
+ "APP_ADMIN_PASSWORD to non-default values before first boot — "
+ "this lock-in is permanent."
);
}
log.info("Kein Admin-User '{}' gefunden. Erstelle Default-Admin...", adminEmail);
// Reuse the Administrators group if it already exists (e.g. a
// previous boot seeded the group but failed before creating
// the admin user, or the operator deleted just the user row
// to retry the seed with a new email). Blind-INSERTing would
// violate user_groups_name_key and abort the context. See #518.
UserGroup adminGroup = groupRepository.findByName("Administrators")
.orElseGet(() -> groupRepository.save(UserGroup.builder()
.name("Administrators")
.permissions(Set.of("ADMIN", "READ_ALL", "WRITE_ALL", "ANNOTATE_ALL", "ADMIN_USER", "ADMIN_TAG", "ADMIN_PERMISSION"))
.build()));
UserGroup adminGroup = UserGroup.builder()
.name("Administrators")
.permissions(Set.of("ADMIN", "READ_ALL", "WRITE_ALL", "ANNOTATE_ALL", "ADMIN_USER", "ADMIN_TAG", "ADMIN_PERMISSION"))
.build();
groupRepository.save(adminGroup);
AppUser admin = AppUser.builder()
.email(adminEmail)

View File

@@ -37,9 +37,6 @@ public class UserService {
private final AppUserRepository userRepository;
private final UserGroupRepository groupRepository;
// Injected directly (not via InviteService) to avoid a constructor injection cycle:
// InviteService → UserService → InviteService. Spring Framework 7 forbids such cycles.
private final InviteTokenRepository inviteTokenRepository;
private final PasswordEncoder passwordEncoder;
private final AuditService auditService;
@@ -274,10 +271,9 @@ public class UserService {
@Transactional
public UserGroup createGroup(GroupDTO dto) {
UserGroup group = UserGroup.builder()
.name(dto.getName())
.permissions(dto.getPermissions() != null ? dto.getPermissions() : new HashSet<>())
.build();
UserGroup group = new UserGroup();
group.setName(dto.getName());
group.setPermissions(dto.getPermissions());
return groupRepository.save(group);
}
@@ -291,10 +287,6 @@ public class UserService {
@Transactional
public void deleteGroup(UUID id) {
if (inviteTokenRepository.existsActiveWithGroupId(id)) {
throw DomainException.conflict(ErrorCode.GROUP_HAS_ACTIVE_INVITES,
"Cannot delete group " + id + " — referenced by one or more active invites");
}
groupRepository.deleteById(id);
}
}

View File

@@ -38,12 +38,6 @@ spring:
starttls:
enable: true
server:
# Behind Caddy/reverse proxy: trust X-Forwarded-{Proto,For,Host} so that
# request.getScheme(), redirect URLs, and Spring Session "Secure" cookies
# reflect the original https client request, not the http hop from Caddy.
forward-headers-strategy: native
management:
health:
mail:
@@ -69,11 +63,7 @@ app:
from: ${APP_MAIL_FROM:noreply@familienarchiv.local}
admin:
# Key must be `email`, not `username` — UserDataInitializer reads
# `${app.admin.email:...}`. The env-var name stays APP_ADMIN_USERNAME
# to match the existing Gitea secrets and DEPLOYMENT.md §3.3.
# See #513.
email: ${APP_ADMIN_USERNAME:admin@familienarchiv.local}
username: ${APP_ADMIN_USERNAME:admin}
password: ${APP_ADMIN_PASSWORD:admin123}
import:

View File

@@ -1,8 +0,0 @@
-- Speeds up "documents by sender" queries used on /persons/[id] Korrespondenz-Überblick (#306),
-- /briefwechsel, and bulk-edit flows.
CREATE INDEX IF NOT EXISTS idx_documents_sender_id
ON documents(sender_id);
-- Speeds up "comments by author" queries on admin user detail and (future) contributor profile.
CREATE INDEX IF NOT EXISTS idx_comments_author_id
ON document_comments(author_id);

View File

@@ -1,7 +0,0 @@
-- Remove duplicate (group_id, permission) rows that accumulated without a UNIQUE constraint.
-- Keeps the row with the smallest ctid (earliest physical insertion order).
DELETE FROM group_permissions a
USING group_permissions b
WHERE a.ctid < b.ctid
AND a.group_id = b.group_id
AND a.permission = b.permission;

View File

@@ -1,11 +0,0 @@
-- Add NOT NULL and PRIMARY KEY to group_permissions.
-- Requires V63 to have run first (no duplicates can remain).
--
-- After this migration, future seed migrations can use:
-- INSERT INTO group_permissions ... ON CONFLICT DO NOTHING
-- instead of the INSERT ... WHERE NOT EXISTS pattern used before V64.
ALTER TABLE group_permissions
ALTER COLUMN permission SET NOT NULL;
ALTER TABLE group_permissions
ADD CONSTRAINT pk_group_permissions PRIMARY KEY (group_id, permission);

View File

@@ -1,8 +0,0 @@
-- Promote the de-facto unique constraint on transcription_block_mentioned_persons to a named PK.
-- uq_tbmp_block_person (added in V57) is backed by a B-tree index identical to a PK;
-- this rename makes the naming convention explicit (pk_* vs uq_*).
ALTER TABLE transcription_block_mentioned_persons
DROP CONSTRAINT uq_tbmp_block_person;
ALTER TABLE transcription_block_mentioned_persons
ADD CONSTRAINT pk_tbmp PRIMARY KEY (block_id, person_id);

View File

@@ -1,3 +0,0 @@
-- The composite PK (invite_token_id, group_id) does not support efficient lookups by group_id alone.
-- Add a dedicated index to support existsActiveWithGroupId queries.
CREATE INDEX idx_itg_group_id ON invite_token_group_ids (group_id);

View File

@@ -399,86 +399,6 @@ class MigrationIntegrationTest {
AND dc.annotation_id IS NOT NULL
""";
// ─── V62: indexes on FK columns ──────────────────────────────────────────
@Test
void v62_idx_documents_sender_id_exists() {
Integer count = jdbc.queryForObject(
"SELECT COUNT(*) FROM pg_catalog.pg_indexes WHERE tablename = 'documents' AND indexname = 'idx_documents_sender_id'",
Integer.class);
assertThat(count).isEqualTo(1);
}
@Test
void v62_idx_comments_author_id_exists() {
Integer count = jdbc.queryForObject(
"SELECT COUNT(*) FROM pg_catalog.pg_indexes WHERE tablename = 'document_comments' AND indexname = 'idx_comments_author_id'",
Integer.class);
assertThat(count).isEqualTo(1);
}
// ─── V63+V64: group_permissions dedup + primary key ──────────────────────
@Test
void v64_pk_group_permissions_exists() {
Integer count = jdbc.queryForObject(
"""
SELECT COUNT(*) FROM pg_catalog.pg_constraint c
JOIN pg_catalog.pg_class t ON c.conrelid = t.oid
WHERE t.relname = 'group_permissions'
AND c.conname = 'pk_group_permissions'
AND c.contype = 'p'
""",
Integer.class);
assertThat(count).isEqualTo(1);
}
@Test
void v64_permission_column_isNotNullable() {
Integer count = jdbc.queryForObject(
"""
SELECT COUNT(*) FROM information_schema.columns
WHERE table_schema = 'public'
AND table_name = 'group_permissions'
AND column_name = 'permission'
AND is_nullable = 'NO'
""",
Integer.class);
assertThat(count).isEqualTo(1);
}
@Test
@Transactional(propagation = Propagation.NOT_SUPPORTED)
void v64_rejectsDuplicateGroupPermission() {
UUID groupId = createUserGroup("DuplicateTestGroup-" + UUID.randomUUID());
try {
jdbc.update("INSERT INTO group_permissions (group_id, permission) VALUES (?, 'READ_ALL')", groupId);
assertThatThrownBy(() ->
jdbc.update("INSERT INTO group_permissions (group_id, permission) VALUES (?, 'READ_ALL')", groupId)
).isInstanceOf(DataIntegrityViolationException.class);
} finally {
jdbc.update("DELETE FROM group_permissions WHERE group_id = ?", groupId);
jdbc.update("DELETE FROM user_groups WHERE id = ?", groupId);
}
}
// ─── V65: tbmp UNIQUE promoted to PRIMARY KEY ─────────────────────────────
@Test
void v65_pk_tbmp_exists() {
Integer count = jdbc.queryForObject(
"""
SELECT COUNT(*) FROM pg_catalog.pg_constraint c
JOIN pg_catalog.pg_class t ON c.conrelid = t.oid
WHERE t.relname = 'transcription_block_mentioned_persons'
AND c.conname = 'pk_tbmp'
AND c.contype = 'p'
""",
Integer.class);
assertThat(count).isEqualTo(1);
}
// ─── helpers ─────────────────────────────────────────────────────────────
private UUID createPerson(String firstName, String lastName) {
@@ -562,10 +482,4 @@ class MigrationIntegrationTest {
""", id, recipientId, docId, commentId);
return id;
}
private UUID createUserGroup(String name) {
UUID id = UUID.randomUUID();
jdbc.update("INSERT INTO user_groups (id, name) VALUES (?, ?)", id, name);
return id;
}
}

View File

@@ -1,48 +0,0 @@
package org.raddatz.familienarchiv.config;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.config.YamlPropertiesFactoryBean;
import org.springframework.boot.web.server.autoconfigure.ServerProperties.ForwardHeadersStrategy;
import org.springframework.boot.context.properties.bind.Binder;
import org.springframework.boot.context.properties.source.ConfigurationPropertySources;
import org.springframework.core.env.PropertiesPropertySource;
import org.springframework.core.io.ClassPathResource;
import java.util.Properties;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Binds {@code server.forward-headers-strategy} from {@code application.yaml} into
* Spring Boot's typed {@link ForwardHeadersStrategy} enum. The binder rejects any
* value that is not a valid enum constant ({@code BindException}), so a typo
* ({@code "nativ"}, {@code "Native"}, {@code "framework "}) or a future Spring
* rename of the property fails the test, not silently degrades to {@code NONE}.
*
* <p>No Spring context, no embedded server, no Testcontainers — this is the
* cheapest test that pins the contract "Caddy's X-Forwarded-Proto is trusted".
*/
class ForwardHeadersConfigurationTest {
@Test
void forward_headers_strategy_binds_to_NATIVE() {
YamlPropertiesFactoryBean yaml = new YamlPropertiesFactoryBean();
yaml.setResources(new ClassPathResource("application.yaml"));
Properties props = yaml.getObject();
assertThat(props).as("application.yaml must be on the classpath").isNotNull();
Binder binder = new Binder(ConfigurationPropertySources.from(
new PropertiesPropertySource("application", props)));
ForwardHeadersStrategy strategy = binder
.bind("server.forward-headers-strategy", ForwardHeadersStrategy.class)
.orElseThrow(() -> new AssertionError(
"server.forward-headers-strategy is missing from application.yaml"));
assertThat(strategy)
.as("Spring must trust X-Forwarded-Proto from Caddy so that "
+ "request.getScheme(), redirect URLs, and the Spring Session "
+ "'Secure' cookie reflect the original https client request.")
.isEqualTo(ForwardHeadersStrategy.NATIVE);
}
}

View File

@@ -1,109 +0,0 @@
package org.raddatz.familienarchiv.document;
import jakarta.persistence.EntityManager;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.raddatz.familienarchiv.PostgresContainerConfig;
import org.raddatz.familienarchiv.config.FlywayConfig;
import org.raddatz.familienarchiv.document.DocumentRepository;
import org.raddatz.familienarchiv.document.Document;
import org.raddatz.familienarchiv.document.DocumentStatus;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.data.jpa.test.autoconfigure.DataJpaTest;
import org.springframework.boot.jdbc.test.autoconfigure.AutoConfigureTestDatabase;
import org.springframework.context.annotation.Import;
import org.springframework.test.annotation.DirtiesContext;
import java.util.List;
import java.util.UUID;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatNoException;
/**
* Repository-level integration tests for {@code findFtsPageRaw}: verifies that the
* paginated FTS query returns exactly page-size rows and that the window-function
* total reflects the full match count, not just the page count.
*
* <p>Uses real Postgres via Testcontainers so the GIN index, tsvector trigger, and
* {@code websearch_to_tsquery} semantics are identical to production.
*
* <p>{@code AFTER_CLASS} dirty-context keeps the Spring context alive for all tests
* in this class and rebuilds it once at the end, rather than after every test.
*/
@DataJpaTest
@AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
@Import({PostgresContainerConfig.class, FlywayConfig.class})
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_CLASS)
class DocumentFtsPagedIntegrationTest {
@Autowired DocumentRepository documentRepository;
@Autowired EntityManager em;
// 60 docs match "Walter"; 10 docs with "Hans" do not.
private static final int WALTER_COUNT = 60;
private static final int PAGE_SIZE = 50;
@BeforeEach
void seed() {
documentRepository.deleteAll();
em.flush();
for (int i = 0; i < WALTER_COUNT; i++) {
documentRepository.saveAndFlush(doc("Brief von Walter Nr. " + i));
}
for (int i = 0; i < 10; i++) {
documentRepository.saveAndFlush(doc("Brief von Hans Nr. " + i));
}
em.clear();
}
@Test
void findFtsPageRaw_firstPage_returnsPageSizeRows() {
List<Object[]> rows = documentRepository.findFtsPageRaw("Walter", 0, PAGE_SIZE);
assertThat(rows).hasSize(PAGE_SIZE);
}
@Test
void findFtsPageRaw_windowTotal_equalsFullMatchCount_notPageSize() {
List<Object[]> rows = documentRepository.findFtsPageRaw("Walter", 0, PAGE_SIZE);
long total = ((Number) rows.get(0)[2]).longValue();
assertThat(total).isEqualTo(WALTER_COUNT);
}
@Test
void findFtsPageRaw_lastPage_returnsRemainder() {
int remainder = WALTER_COUNT % PAGE_SIZE; // 60 % 50 = 10
List<Object[]> rows = documentRepository.findFtsPageRaw("Walter", PAGE_SIZE, PAGE_SIZE);
assertThat(rows).hasSize(remainder);
long total = ((Number) rows.get(0)[2]).longValue();
assertThat(total).isEqualTo(WALTER_COUNT);
}
@Test
void findFtsPageRaw_noMatches_returnsEmptyList() {
List<Object[]> rows = documentRepository.findFtsPageRaw("XYZ_KEIN_TREFFER", 0, PAGE_SIZE);
assertThat(rows).isEmpty();
}
@Test
void findFtsPageRaw_stopwordOnlyQuery_returnsEmptyList_noException() {
assertThatNoException().isThrownBy(() -> {
List<Object[]> rows = documentRepository.findFtsPageRaw("der die das und", 0, PAGE_SIZE);
assertThat(rows).isEmpty();
});
}
// ─── Helper ───────────────────────────────────────────────────────────────
private Document doc(String title) {
return Document.builder()
.title(title)
.originalFilename(title.replace(" ", "_") + ".pdf")
.status(DocumentStatus.UPLOADED)
.build();
}
}

View File

@@ -69,7 +69,7 @@ class DocumentFtsTest {
documentRepository.saveAndFlush(document("Alter Brief"));
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Brief");
List<UUID> ids = documentRepository.findRankedIdsByFts("Brief");
assertThat(ids).hasSize(1);
}
@@ -79,7 +79,7 @@ class DocumentFtsTest {
documentRepository.saveAndFlush(document("Alter Brief"));
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Briefe");
List<UUID> ids = documentRepository.findRankedIdsByFts("Briefe");
assertThat(ids).hasSize(1);
}
@@ -89,7 +89,7 @@ class DocumentFtsTest {
documentRepository.saveAndFlush(document("Ein furchtbarer Brief"));
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("furchtb");
List<UUID> ids = documentRepository.findRankedIdsByFts("furchtb");
assertThat(ids).hasSize(1);
}
@@ -99,7 +99,7 @@ class DocumentFtsTest {
documentRepository.saveAndFlush(document("Familienfoto"));
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Brief");
List<UUID> ids = documentRepository.findRankedIdsByFts("Brief");
assertThat(ids).isEmpty();
}
@@ -115,7 +115,7 @@ class DocumentFtsTest {
em.flush();
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("schreiben");
List<UUID> ids = documentRepository.findRankedIdsByFts("schreiben");
assertThat(ids).contains(doc.getId());
}
@@ -125,14 +125,14 @@ class DocumentFtsTest {
Document doc = documentRepository.saveAndFlush(document("Leeres Dokument"));
em.clear();
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).isEmpty();
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).isEmpty();
UUID annotationId = annotation(doc.getId());
blockRepository.saveAndFlush(block(doc.getId(), annotationId, "Grundbuch Eintrag 1923", 0));
em.flush();
em.clear();
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).contains(doc.getId());
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).contains(doc.getId());
}
@Test
@@ -144,13 +144,13 @@ class DocumentFtsTest {
em.flush();
em.clear();
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).contains(doc.getId());
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).contains(doc.getId());
blockRepository.deleteById(block.getId());
em.flush();
em.clear();
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).doesNotContain(doc.getId());
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).doesNotContain(doc.getId());
}
// ─── Ranking ───────────────────────────────────────────────────────────────
@@ -166,7 +166,7 @@ class DocumentFtsTest {
em.flush();
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Grundbuch");
List<UUID> ids = documentRepository.findRankedIdsByFts("Grundbuch");
assertThat(ids).hasSize(2);
assertThat(ids.get(0)).isEqualTo(docA.getId());
@@ -179,7 +179,7 @@ class DocumentFtsTest {
documentRepository.saveAndFlush(document("Ein Brief von der Oma"));
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("der die das und");
List<UUID> ids = documentRepository.findRankedIdsByFts("der die das und");
assertThat(ids).isEmpty();
}
@@ -195,7 +195,7 @@ class DocumentFtsTest {
em.flush();
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Wille");
List<UUID> ids = documentRepository.findRankedIdsByFts("Wille");
assertThat(ids).contains(doc.getId());
}
@@ -205,7 +205,7 @@ class DocumentFtsTest {
documentRepository.saveAndFlush(document("Brief"));
em.clear();
assertThatNoException().isThrownBy(() -> documentRepository.findAllMatchingIdsByFts("((("));
assertThatNoException().isThrownBy(() -> documentRepository.findRankedIdsByFts("((("));
}
// ─── Weight C: sender/receiver names ───────────────────────────────────────
@@ -223,7 +223,7 @@ class DocumentFtsTest {
em.flush();
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Schmidt");
List<UUID> ids = documentRepository.findRankedIdsByFts("Schmidt");
assertThat(ids).contains(doc.getId());
}
@@ -241,7 +241,7 @@ class DocumentFtsTest {
em.flush();
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Raddatz");
List<UUID> ids = documentRepository.findRankedIdsByFts("Raddatz");
assertThat(ids).contains(doc.getId());
}
@@ -260,7 +260,7 @@ class DocumentFtsTest {
em.flush();
em.clear();
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Familiengeschichte");
List<UUID> ids = documentRepository.findRankedIdsByFts("Familiengeschichte");
assertThat(ids).hasSize(1);
}
@@ -278,7 +278,7 @@ class DocumentFtsTest {
em.flush();
em.clear();
List<UUID> rankedIds = documentRepository.findAllMatchingIdsByFts("Grundbuch");
List<UUID> rankedIds = documentRepository.findRankedIdsByFts("Grundbuch");
Specification<Document> spec = Specification.where(hasIds(rankedIds))
.and(hasStatus(DocumentStatus.UPLOADED));

View File

@@ -21,22 +21,17 @@ import org.springframework.data.domain.Pageable;
import org.springframework.data.jpa.domain.Specification;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
class DocumentServiceSortTest {
private static final Pageable PAGE = org.springframework.data.domain.PageRequest.of(0, 10_000);
private static final Pageable UNPAGED = org.springframework.data.domain.PageRequest.of(0, 10_000);
@Mock DocumentRepository documentRepository;
@Mock PersonService personService;
@@ -48,12 +43,12 @@ class DocumentServiceSortTest {
@Mock TranscriptionBlockQueryService transcriptionBlockQueryService;
@InjectMocks DocumentService documentService;
// ─── DATE sort ────────────────────────────────────────────────────────────
// ─── searchDocuments — DATE sort ──────────────────────────────────────────
@Test
void searchDocuments_with_DATE_sort_and_text_sorts_chronologically_not_by_relevance() {
UUID id1 = UUID.randomUUID(); // higher relevance, older doc
UUID id2 = UUID.randomUUID(); // lower relevance, newer doc
UUID id1 = UUID.randomUUID(); // rank position 0 (higher relevance, older doc)
UUID id2 = UUID.randomUUID(); // rank position 1 (lower relevance, newer doc)
Document older = Document.builder().id(id1)
.title("Brief").status(DocumentStatus.UPLOADED)
@@ -62,48 +57,38 @@ class DocumentServiceSortTest {
.title("Brief").status(DocumentStatus.UPLOADED)
.documentDate(LocalDate.of(1960, 1, 1)).build();
when(documentRepository.findAllMatchingIdsByFts("Brief")).thenReturn(List.of(id1, id2));
// FTS returns id1 first (higher rank), id2 second
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(id1, id2));
// findAll(spec, pageable) — the correct date path — returns date-DESC order
when(documentRepository.findAll(any(Specification.class), any(Pageable.class)))
.thenReturn(new PageImpl<>(List.of(newer, older)));
DocumentSearchResult result = documentService.searchDocuments(
"Brief", null, null, null, null, null, null, null, DocumentSort.DATE, "DESC", null, PAGE);
"Brief", null, null, null, null, null, null, null, DocumentSort.DATE, "DESC", null, UNPAGED);
// Expect: date order (newer 1960 first), NOT rank order (older 1940 first)
assertThat(result.items()).hasSize(2);
assertThat(result.items().get(0).document().getId()).isEqualTo(id2); // newer first
assertThat(result.items().get(0).document().getId()).isEqualTo(id2); // newer doc first
}
// ─── RELEVANCE sort — pure text (no filters) ──────────────────────────────
@Test
void searchDocuments_relevance_pureText_calls_findFtsPageRaw_not_findAllMatchingIds() {
UUID id1 = UUID.randomUUID();
List<Object[]> ftsRows = ftsRows(id1, 0.5d, 1L);
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
when(documentRepository.findAllById(any()))
.thenReturn(List.of(doc(id1)));
documentService.searchDocuments(
"Brief", null, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, PAGE);
verify(documentRepository).findFtsPageRaw(anyString(), anyInt(), anyInt());
verify(documentRepository, never()).findAllMatchingIdsByFts(anyString());
}
// ─── searchDocuments — RELEVANCE sort ─────────────────────────────────────
@Test
void searchDocuments_with_RELEVANCE_sort_and_text_preserves_fts_rank_order() {
UUID id1 = UUID.randomUUID(); // higher rank — must appear first
UUID id2 = UUID.randomUUID(); // lower rank
UUID id1 = UUID.randomUUID(); // rank position 0
UUID id2 = UUID.randomUUID(); // rank position 1
List<Object[]> ftsRows = new ArrayList<>();
ftsRows.add(new Object[]{id1, 0.8d, 2L});
ftsRows.add(new Object[]{id2, 0.3d, 2L});
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
when(documentRepository.findAllById(any())).thenReturn(List.of(doc(id2), doc(id1))); // unordered from JPA
Document doc1 = Document.builder().id(id1).title("Brief").status(DocumentStatus.UPLOADED).build();
Document doc2 = Document.builder().id(id2).title("Brief").status(DocumentStatus.UPLOADED).build();
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(id1, id2));
when(documentRepository.findAll(any(Specification.class)))
.thenReturn(List.of(doc2, doc1)); // unordered from DB
DocumentSearchResult result = documentService.searchDocuments(
"Brief", null, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, PAGE);
"Brief", null, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, UNPAGED);
// Expect: rank order restored (id1 first)
assertThat(result.items().get(0).document().getId()).isEqualTo(id1);
}
@@ -112,82 +97,16 @@ class DocumentServiceSortTest {
UUID id1 = UUID.randomUUID();
UUID id2 = UUID.randomUUID();
List<Object[]> ftsRows = new ArrayList<>();
ftsRows.add(new Object[]{id1, 0.8d, 2L});
ftsRows.add(new Object[]{id2, 0.3d, 2L});
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
when(documentRepository.findAllById(any())).thenReturn(List.of(doc(id2), doc(id1)));
Document doc1 = Document.builder().id(id1).title("Brief").status(DocumentStatus.UPLOADED).build();
Document doc2 = Document.builder().id(id2).title("Brief").status(DocumentStatus.UPLOADED).build();
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(id1, id2));
when(documentRepository.findAll(any(Specification.class)))
.thenReturn(List.of(doc2, doc1));
DocumentSearchResult result = documentService.searchDocuments(
"Brief", null, null, null, null, null, null, null, null, null, null, PAGE);
"Brief", null, null, null, null, null, null, null, null, null, null, UNPAGED);
assertThat(result.items().get(0).document().getId()).isEqualTo(id1);
}
// ─── RELEVANCE sort — overflow guard ─────────────────────────────────────
@Test
void searchDocuments_relevance_returns_empty_when_offset_exceeds_maxInt() {
// offset = pageNumber * pageSize; choose values so offset > Integer.MAX_VALUE
Pageable hugePage = org.springframework.data.domain.PageRequest.of(Integer.MAX_VALUE / 10 + 1, 10);
DocumentSearchResult result = documentService.searchDocuments(
"Brief", null, null, null, null, null, null, null,
DocumentSort.RELEVANCE, null, null, hugePage);
assertThat(result.items()).isEmpty();
verify(documentRepository, never()).findFtsPageRaw(anyString(), anyInt(), anyInt());
}
// ─── toFtsPage — UUID-as-String JDBC driver variance ────────────────────
@Test
void searchDocuments_relevance_handles_string_uuid_from_jdbc_driver() {
String stringId = "11111111-1111-1111-1111-111111111111";
UUID uuidId = UUID.fromString(stringId);
// Simulate a JDBC driver that returns the id column as String instead of UUID
List<Object[]> ftsRows = new ArrayList<>();
ftsRows.add(new Object[]{stringId, 0.5d, 1L});
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
when(documentRepository.findAllById(any())).thenReturn(List.of(doc(uuidId)));
DocumentSearchResult result = documentService.searchDocuments(
"Brief", null, null, null, null, null, null, null,
DocumentSort.RELEVANCE, null, null, PAGE);
assertThat(result.items()).hasSize(1);
assertThat(result.items().get(0).document().getId()).isEqualTo(uuidId);
}
// ─── RELEVANCE sort — text + active filter ────────────────────────────────
@Test
void searchDocuments_relevance_with_active_filter_uses_inMemory_path() {
UUID id1 = UUID.randomUUID();
UUID id2 = UUID.randomUUID();
when(documentRepository.findAllMatchingIdsByFts("Brief")).thenReturn(List.of(id1, id2));
when(documentRepository.findAll(any(Specification.class)))
.thenReturn(List.of(doc(id2), doc(id1)));
// sender filter is active → triggers in-memory path, not findFtsPageRaw
LocalDate from = LocalDate.of(1900, 1, 1);
documentService.searchDocuments(
"Brief", from, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, PAGE);
verify(documentRepository, never()).findFtsPageRaw(anyString(), anyInt(), anyInt());
verify(documentRepository).findAllMatchingIdsByFts("Brief");
}
// ─── Helpers ──────────────────────────────────────────────────────────────
private static Document doc(UUID id) {
return Document.builder().id(id).title("Brief").status(DocumentStatus.UPLOADED).build();
}
private static List<Object[]> ftsRows(UUID id, double rank, long total) {
List<Object[]> rows = new ArrayList<>();
rows.add(new Object[]{id, rank, total});
return rows;
}
}

View File

@@ -1620,10 +1620,9 @@ class DocumentServiceTest {
// chr(1)=\u0001 marks start, chr(2)=\u0002 marks end of highlighted term
List<Object[]> rows = Collections.singletonList(new Object[]{docId, "\u0001Brief\u0002 an Anna", null, false, null, null, null});
List<Object[]> ftsRows = new java.util.ArrayList<>();
ftsRows.add(new Object[]{docId, 0.5d, 1L});
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
when(documentRepository.findAllById(any())).thenReturn(List.of(doc));
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(docId));
when(documentRepository.findAll(any(org.springframework.data.jpa.domain.Specification.class)))
.thenReturn(List.of(doc));
when(documentRepository.findEnrichmentData(any(), eq("Brief"))).thenReturn(rows);
DocumentSearchResult result = documentService.searchDocuments(
@@ -1655,10 +1654,9 @@ class DocumentServiceTest {
String snippetHeadline = "Hier ist der \u0001Brief\u0002 aus Berlin";
List<Object[]> rows = Collections.singletonList(new Object[]{docId, "Dok", snippetHeadline, false, null, null, null});
List<Object[]> snippetFtsRows = new java.util.ArrayList<>();
snippetFtsRows.add(new Object[]{docId, 0.5d, 1L});
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(snippetFtsRows);
when(documentRepository.findAllById(any())).thenReturn(List.of(doc));
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(docId));
when(documentRepository.findAll(any(org.springframework.data.jpa.domain.Specification.class)))
.thenReturn(List.of(doc));
when(documentRepository.findEnrichmentData(any(), eq("Brief"))).thenReturn(rows);
DocumentSearchResult result = documentService.searchDocuments(
@@ -2204,7 +2202,7 @@ class DocumentServiceTest {
@Test
void findIdsForFilter_returnsEmpty_whenFtsHasNoMatches() {
when(documentRepository.findAllMatchingIdsByFts("xyz")).thenReturn(List.of());
when(documentRepository.findRankedIdsByFts("xyz")).thenReturn(List.of());
List<UUID> result = documentService.findIdsForFilter(
"xyz", null, null, null, null, null, null, null, null);
@@ -2388,7 +2386,7 @@ class DocumentServiceTest {
@Test
void getDensity_shortCircuits_whenFtsReturnsNoMatches() {
when(documentRepository.findAllMatchingIdsByFts("xyz")).thenReturn(List.of());
when(documentRepository.findRankedIdsByFts("xyz")).thenReturn(List.of());
DocumentDensityResult result = documentService.getDensity(
new DensityFilters("xyz", null, null, null, null, null, null));

View File

@@ -10,7 +10,6 @@ import org.raddatz.familienarchiv.document.DocumentStatus;
import org.raddatz.familienarchiv.document.DocumentRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.context.annotation.Import;
import org.springframework.test.context.DynamicPropertyRegistry;
import org.springframework.test.context.DynamicPropertySource;
@@ -42,7 +41,6 @@ import static org.assertj.core.api.Assertions.assertThat;
* test pyramid mocks at the FileService boundary.
*/
@SpringBootTest
@ActiveProfiles("test")
@Import(PostgresContainerConfig.class)
class ThumbnailServiceIntegrationTest {

View File

@@ -44,14 +44,6 @@ class CommentControllerTest {
// ─── Block comment endpoints ─────────────────────────────────────────────
@Test
@WithMockUser
void getBlockComments_returns400_when_documentId_is_not_a_UUID() throws Exception {
UUID blockId = UUID.randomUUID();
mockMvc.perform(get("/api/documents/NOT-A-UUID/transcription-blocks/" + blockId + "/comments"))
.andExpect(status().isBadRequest());
}
@Test
@WithMockUser
void getBlockComments_returns200() throws Exception {
@@ -123,15 +115,6 @@ class CommentControllerTest {
// ─── Block reply endpoints ───────────────────────────────────────────────
@Test
@WithMockUser(authorities = "ANNOTATE_ALL")
void replyToBlockComment_returns400_when_blockId_is_not_a_UUID() throws Exception {
mockMvc.perform(post("/api/documents/" + DOC_ID + "/transcription-blocks/NOT-A-UUID"
+ "/comments/" + COMMENT_ID + "/replies")
.contentType(MediaType.APPLICATION_JSON).content(COMMENT_JSON))
.andExpect(status().isBadRequest());
}
@Test
void replyToBlockComment_returns401_whenUnauthenticated() throws Exception {
UUID blockId = UUID.randomUUID();

View File

@@ -20,10 +20,7 @@ import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import java.io.File;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.LocalDate;
@@ -53,7 +50,6 @@ class MassImportServiceTest {
void setUp() {
service = new MassImportService(documentService, personService, tagService, s3Client, thumbnailAsyncRunner);
ReflectionTestUtils.setField(service, "bucketName", "test-bucket");
ReflectionTestUtils.setField(service, "importDir", "/import");
ReflectionTestUtils.setField(service, "colIndex", 0);
ReflectionTestUtils.setField(service, "colBox", 1);
ReflectionTestUtils.setField(service, "colFolder", 2);
@@ -73,64 +69,20 @@ class MassImportServiceTest {
assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.IDLE);
}
@Test
void getStatus_hasStatusCode_IMPORT_IDLE_byDefault() {
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_IDLE");
}
// ─── runImportAsync ───────────────────────────────────────────────────────
@Test
void runImportAsync_setsFailedStatus_whenImportDirectoryDoesNotExist() {
// /import directory doesn't exist in test environment → IOException → IMPORT_FAILED_INTERNAL
// /import directory doesn't exist in test environment → findSpreadsheetFile throws
service.runImportAsync();
assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.FAILED);
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_FAILED_INTERNAL");
}
@Test
void runImportAsync_readsFromConfiguredImportDir(@TempDir Path tempDir) {
// Empty temp dir → findSpreadsheetFile throws "no spreadsheet" with the
// configured path in the message. Proves the field, not a constant,
// drives the lookup.
ReflectionTestUtils.setField(service, "importDir", tempDir.toString());
service.runImportAsync();
assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.FAILED);
assertThat(service.getStatus().message()).contains(tempDir.toString());
}
@Test
void runImportAsync_setsStatusCode_IMPORT_FAILED_NO_SPREADSHEET_whenDirIsEmpty(@TempDir Path tempDir) {
ReflectionTestUtils.setField(service, "importDir", tempDir.toString());
service.runImportAsync();
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_FAILED_NO_SPREADSHEET");
}
@Test
void runImportAsync_setsStatusCode_IMPORT_DONE_whenSpreadsheetHasNoDataRows(@TempDir Path tempDir) throws Exception {
Path xlsx = tempDir.resolve("import.xlsx");
try (XSSFWorkbook wb = new XSSFWorkbook()) {
wb.createSheet("Sheet1");
try (OutputStream out = Files.newOutputStream(xlsx)) {
wb.write(out);
}
}
ReflectionTestUtils.setField(service, "importDir", tempDir.toString());
service.runImportAsync();
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_DONE");
}
@Test
void runImportAsync_throwsConflict_whenAlreadyRunning() {
MassImportService.ImportStatus running = new MassImportService.ImportStatus(
MassImportService.State.RUNNING, "IMPORT_RUNNING", "Running...", 0, LocalDateTime.now());
MassImportService.State.RUNNING, "Running...", 0, LocalDateTime.now());
ReflectionTestUtils.setField(service, "currentStatus", running);
assertThatThrownBy(() -> service.runImportAsync())

View File

@@ -1,134 +0,0 @@
package org.raddatz.familienarchiv.security;
import jakarta.servlet.FilterChain;
import jakarta.servlet.http.Cookie;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import org.springframework.mock.web.MockHttpServletRequest;
import org.springframework.mock.web.MockHttpServletResponse;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
/**
* The filter must turn a browser-side {@code Cookie: auth_token=Basic%20<base64>}
* into {@code Authorization: Basic <base64>} (URL-decoded) so that Spring's
* Basic-auth filter accepts it. Skips when the request already has an explicit
* {@code Authorization} header, or when no {@code auth_token} cookie is present.
*
* <p>See #520.
*/
class AuthTokenCookieFilterTest {
private final AuthTokenCookieFilter filter = new AuthTokenCookieFilter();
@Test
void promotes_url_encoded_auth_token_cookie_to_decoded_Authorization_header() throws Exception {
MockHttpServletRequest req = new MockHttpServletRequest();
req.setRequestURI("/api/users/me");
req.setCookies(new Cookie("auth_token", "Basic%20YWRtaW5AZmFtaWx5YXJjaGl2ZS5sb2NhbDpzZWNyZXQ%3D"));
MockHttpServletResponse res = new MockHttpServletResponse();
FilterChain chain = mock(FilterChain.class);
filter.doFilter(req, res, chain);
ArgumentCaptor<HttpServletRequest> captor = ArgumentCaptor.forClass(HttpServletRequest.class);
verify(chain, times(1)).doFilter(captor.capture(), org.mockito.ArgumentMatchers.any(HttpServletResponse.class));
HttpServletRequest forwarded = captor.getValue();
assertThat(forwarded.getHeader("Authorization"))
.as("Authorization must be URL-decoded so Spring's Basic parser sees a literal space")
.isEqualTo("Basic YWRtaW5AZmFtaWx5YXJjaGl2ZS5sb2NhbDpzZWNyZXQ=");
}
@Test
void preserves_explicit_Authorization_header_and_ignores_cookie() throws Exception {
MockHttpServletRequest req = new MockHttpServletRequest();
req.setRequestURI("/api/users/me");
req.addHeader("Authorization", "Basic explicit-header-wins");
req.setCookies(new Cookie("auth_token", "Basic%20cookie-would-have-promoted"));
MockHttpServletResponse res = new MockHttpServletResponse();
FilterChain chain = mock(FilterChain.class);
filter.doFilter(req, res, chain);
// Forwards the original request unchanged — same instance, no wrapping.
verify(chain).doFilter(req, res);
}
@Test
void passes_through_when_no_cookies_at_all() throws Exception {
MockHttpServletRequest req = new MockHttpServletRequest();
req.setRequestURI("/api/users/me");
MockHttpServletResponse res = new MockHttpServletResponse();
FilterChain chain = mock(FilterChain.class);
filter.doFilter(req, res, chain);
verify(chain).doFilter(req, res);
}
@Test
void passes_through_when_auth_token_cookie_is_absent() throws Exception {
MockHttpServletRequest req = new MockHttpServletRequest();
req.setRequestURI("/api/users/me");
req.setCookies(new Cookie("some_other_cookie", "value"));
MockHttpServletResponse res = new MockHttpServletResponse();
FilterChain chain = mock(FilterChain.class);
filter.doFilter(req, res, chain);
verify(chain).doFilter(req, res);
}
@Test
void passes_through_when_auth_token_cookie_is_empty() throws Exception {
MockHttpServletRequest req = new MockHttpServletRequest();
req.setRequestURI("/api/users/me");
req.setCookies(new Cookie("auth_token", ""));
MockHttpServletResponse res = new MockHttpServletResponse();
FilterChain chain = mock(FilterChain.class);
filter.doFilter(req, res, chain);
verify(chain).doFilter(req, res);
}
@Test
void passes_through_unchanged_when_request_is_outside_api_scope() throws Exception {
MockHttpServletRequest req = new MockHttpServletRequest();
// /actuator/health and similar must NOT receive a promoted Authorization
// header — they have their own access rules and should never be authed
// via the cookie.
req.setRequestURI("/actuator/health");
req.setCookies(new Cookie("auth_token", "Basic%20YWR=="));
MockHttpServletResponse res = new MockHttpServletResponse();
FilterChain chain = mock(FilterChain.class);
filter.doFilter(req, res, chain);
// Forwards the original request unchanged — same instance, no wrapping.
verify(chain).doFilter(req, res);
}
@Test
void passes_through_unchanged_when_cookie_value_is_malformed_percent_encoding() throws Exception {
MockHttpServletRequest req = new MockHttpServletRequest();
req.setRequestURI("/api/users/me");
// Lone "%" without two hex digits → URLDecoder throws → filter must
// refuse to forward a bogus Authorization header.
req.setCookies(new Cookie("auth_token", "Basic%2"));
MockHttpServletResponse res = new MockHttpServletResponse();
FilterChain chain = mock(FilterChain.class);
filter.doFilter(req, res, chain);
// Forwards the original request unchanged — Spring Security treats it
// as unauthenticated rather than crashing on bad input.
verify(chain).doFilter(req, res);
}
}

View File

@@ -40,47 +40,6 @@ class AdminControllerTest {
@MockitoBean ThumbnailBackfillService thumbnailBackfillService;
@MockitoBean CustomUserDetailsService customUserDetailsService;
// ─── GET /api/admin/import-status ─────────────────────────────────────────
@Test
@WithMockUser(authorities = "ADMIN")
void importStatus_returns200_withStatusCode_whenAdmin() throws Exception {
MassImportService.ImportStatus status = new MassImportService.ImportStatus(
MassImportService.State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
when(massImportService.getStatus()).thenReturn(status);
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.state").value("IDLE"))
.andExpect(jsonPath("$.statusCode").value("IMPORT_IDLE"))
.andExpect(jsonPath("$.processed").value(0));
}
@Test
@WithMockUser(authorities = "ADMIN")
void importStatus_messageField_notPresentInApiResponse() throws Exception {
MassImportService.ImportStatus status = new MassImportService.ImportStatus(
MassImportService.State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
when(massImportService.getStatus()).thenReturn(status);
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message").doesNotExist());
}
@Test
void importStatus_returns401_whenUnauthenticated() throws Exception {
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isUnauthorized());
}
@Test
@WithMockUser(authorities = "READ_ALL")
void importStatus_returns403_whenUserLacksAdminPermission() throws Exception {
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isForbidden());
}
@Test
void backfillVersions_returns401_whenUnauthenticated() throws Exception {
mockMvc.perform(post("/api/admin/backfill-versions"))

View File

@@ -1,174 +0,0 @@
package org.raddatz.familienarchiv.user;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.boot.CommandLineRunner;
import org.springframework.core.env.Environment;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.test.util.ReflectionTestUtils;
import java.util.Optional;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* UserDataInitializer must refuse to seed the admin user with the hardcoded
* dev defaults when running outside the {@code dev} profile.
*
* <p>Why this matters: per DEPLOYMENT.md §3.5 and ADR-011, the admin password
* is permanently locked on first deploy (UserDataInitializer only seeds when
* the row is missing). If an operator forgets to set {@code APP_ADMIN_USERNAME}
* / {@code APP_ADMIN_PASSWORD}, prod silently boots with the well-known dev
* defaults — a credential-disclosure foot-gun, not a config typo. See #513.
*/
@ExtendWith(MockitoExtension.class)
class AdminSeedFailClosedTest {
@Mock AppUserRepository userRepository;
@Mock UserGroupRepository groupRepository;
@Mock Environment environment;
@Mock PasswordEncoder passwordEncoder;
UserDataInitializer initializer;
@BeforeEach
void setUp() {
initializer = new UserDataInitializer(userRepository, groupRepository, environment);
}
@Test
void refuses_to_seed_when_email_is_default_and_profile_is_not_dev() throws Exception {
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
ReflectionTestUtils.setField(initializer, "adminEmail", UserDataInitializer.DEFAULT_ADMIN_EMAIL);
ReflectionTestUtils.setField(initializer, "adminPassword", "operator-set-this-one");
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
assertThatThrownBy(() -> runner.run())
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("default credentials")
.hasMessageContaining("permanent");
verify(userRepository, never()).save(org.mockito.ArgumentMatchers.any());
}
@Test
void refuses_to_seed_when_password_is_default_and_profile_is_not_dev() throws Exception {
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
ReflectionTestUtils.setField(initializer, "adminPassword", UserDataInitializer.DEFAULT_ADMIN_PASSWORD);
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
assertThatThrownBy(() -> runner.run())
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("default credentials");
}
@Test
void allows_seed_when_both_values_are_set_and_profile_is_not_dev() throws Exception {
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
when(groupRepository.findByName("Administrators")).thenReturn(Optional.empty());
when(groupRepository.save(any(UserGroup.class))).thenAnswer(inv -> inv.getArgument(0));
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
ReflectionTestUtils.setField(initializer, "adminPassword", "a-real-strong-password");
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
runner.run();
verify(userRepository).save(any(AppUser.class));
}
@Test
void allows_seed_with_defaults_when_profile_is_dev() throws Exception {
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
when(groupRepository.findByName("Administrators")).thenReturn(Optional.empty());
when(groupRepository.save(any(UserGroup.class))).thenAnswer(inv -> inv.getArgument(0));
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(true);
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
ReflectionTestUtils.setField(initializer, "adminEmail", UserDataInitializer.DEFAULT_ADMIN_EMAIL);
ReflectionTestUtils.setField(initializer, "adminPassword", UserDataInitializer.DEFAULT_ADMIN_PASSWORD);
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
runner.run();
verify(userRepository).save(any(AppUser.class));
}
@Test
void does_not_check_defaults_when_admin_already_exists() throws Exception {
AppUser existing = AppUser.builder()
.email("someone@example.com")
.password("$2a$10$stub")
.build();
when(userRepository.findByEmail(anyString())).thenReturn(Optional.of(existing));
ReflectionTestUtils.setField(initializer, "adminEmail", UserDataInitializer.DEFAULT_ADMIN_EMAIL);
ReflectionTestUtils.setField(initializer, "adminPassword", UserDataInitializer.DEFAULT_ADMIN_PASSWORD);
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
runner.run();
verify(userRepository, never()).save(org.mockito.ArgumentMatchers.any());
// Importantly, no IllegalStateException — re-deploys must not panic over
// historical default-seeded data they cannot retroactively fix.
}
@Test
void reuses_existing_Administrators_group_when_seeding_a_new_admin() throws Exception {
// Setup: admin user does not exist, but the Administrators group does
// (e.g. previous boot seeded the group then failed; operator deleted
// the bad user row to retry with a corrected APP_ADMIN_USERNAME). The
// re-seed must reuse the group, not blind-INSERT a duplicate. See #518.
UserGroup existingGroup = UserGroup.builder()
.name("Administrators")
.build();
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
when(groupRepository.findByName("Administrators")).thenReturn(Optional.of(existingGroup));
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
ReflectionTestUtils.setField(initializer, "adminPassword", "a-real-strong-password");
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
runner.run();
// Group must not be re-inserted — that would violate user_groups_name_key.
verify(groupRepository, never()).save(any(UserGroup.class));
// But the admin user IS created, with the existing group attached.
org.mockito.ArgumentCaptor<AppUser> captor = org.mockito.ArgumentCaptor.forClass(AppUser.class);
verify(userRepository).save(captor.capture());
assertThat(captor.getValue().getGroups()).containsExactly(existingGroup);
}
@Test
void creates_Administrators_group_when_seeding_admin_on_a_fresh_database() throws Exception {
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
when(groupRepository.findByName("Administrators")).thenReturn(Optional.empty());
when(groupRepository.save(any(UserGroup.class))).thenAnswer(inv -> inv.getArgument(0));
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
ReflectionTestUtils.setField(initializer, "adminPassword", "a-real-strong-password");
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
runner.run();
// Group should be inserted exactly once.
verify(groupRepository).save(any(UserGroup.class));
verify(userRepository).save(any(AppUser.class));
}
}

View File

@@ -1,95 +0,0 @@
package org.raddatz.familienarchiv.user;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.beans.factory.config.YamlPropertiesFactoryBean;
import org.springframework.boot.context.properties.bind.Binder;
import org.springframework.boot.context.properties.source.ConfigurationPropertySources;
import org.springframework.core.env.PropertiesPropertySource;
import org.springframework.core.io.ClassPathResource;
import java.lang.reflect.Field;
import java.util.Properties;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Pins the admin-seed property key contract. {@code UserDataInitializer} reads
* {@code @Value("${app.admin.email:...}")} and {@code @Value("${app.admin.password:...}")}.
* The yaml MUST expose those exact keys, not e.g. {@code app.admin.username}, or
* the env vars {@code APP_ADMIN_USERNAME} / {@code APP_ADMIN_PASSWORD} are
* silently ignored and the admin user gets seeded with the hardcoded defaults.
*
* <p>Discovered as a HIGH bug during the production-deploy bootstrap (#513): on
* first deploy the prod admin password is permanently locked to whatever ends
* up in the database, so a key-name mismatch would lock prod to the dev defaults
* {@code admin@familyarchive.local} / {@code admin123}.
*
* <p>No Spring context — Binder reads application.yaml directly.
*/
class AdminSeedPropertyKeyTest {
@Test
void admin_email_key_binds_from_yaml() {
Binder binder = binderFromApplicationYaml();
String email = binder.bind("app.admin.email", String.class)
.orElseThrow(() -> new AssertionError(
"app.admin.email is missing from application.yaml. "
+ "UserDataInitializer reads this exact key; if the yaml uses "
+ "a different name (e.g. 'username'), the env var "
+ "APP_ADMIN_USERNAME is silently ignored."));
assertThat(email)
.as("app.admin.email must resolve from APP_ADMIN_USERNAME or its default")
.isNotBlank();
}
@Test
void admin_password_key_binds_from_yaml() {
Binder binder = binderFromApplicationYaml();
String password = binder.bind("app.admin.password", String.class)
.orElseThrow(() -> new AssertionError(
"app.admin.password is missing from application.yaml. "
+ "UserDataInitializer reads this exact key."));
assertThat(password)
.as("app.admin.password must resolve from APP_ADMIN_PASSWORD or its default")
.isNotBlank();
}
@Test
void userDataInitializer_reads_app_admin_email_not_username() throws NoSuchFieldException {
// Pin the Java side too: a future rename of the @Value placeholder
// (e.g. back to `${app.admin.username:...}`) would silently break the
// binding while the yaml-side assertions above still pass. See #513.
Field field = UserDataInitializer.class.getDeclaredField("adminEmail");
Value annotation = field.getAnnotation(Value.class);
assertThat(annotation)
.as("UserDataInitializer.adminEmail must be @Value-annotated")
.isNotNull();
assertThat(annotation.value())
.as("UserDataInitializer must read app.admin.email — not username or any other key")
.startsWith("${app.admin.email:");
}
@Test
void userDataInitializer_reads_app_admin_password() throws NoSuchFieldException {
Field field = UserDataInitializer.class.getDeclaredField("adminPassword");
Value annotation = field.getAnnotation(Value.class);
assertThat(annotation).isNotNull();
assertThat(annotation.value())
.as("UserDataInitializer must read app.admin.password")
.startsWith("${app.admin.password:");
}
private Binder binderFromApplicationYaml() {
YamlPropertiesFactoryBean yaml = new YamlPropertiesFactoryBean();
yaml.setResources(new ClassPathResource("application.yaml"));
Properties props = yaml.getObject();
assertThat(props).as("application.yaml must be on the classpath").isNotNull();
return new Binder(ConfigurationPropertySources.from(
new PropertiesPropertySource("application", props)));
}
}

View File

@@ -35,15 +35,4 @@ class AppUserTest {
.count();
assertThat(distinct).isGreaterThan(1);
}
@Test
void computeColor_returnsValidPaletteColorForIntegerMinValueHash() {
// UUID "80000000-0000-0000-0000-000000000000" has hashCode() == Integer.MIN_VALUE.
// Math.abs(Integer.MIN_VALUE) overflows back to Integer.MIN_VALUE (negative), making
// Math.abs(hashCode()) % n unsafe for palette sizes that don't evenly divide MIN_VALUE.
// Math.floorMod eliminates this edge case entirely.
UUID minHashId = UUID.fromString("80000000-0000-0000-0000-000000000000");
assertThat(minHashId.hashCode()).isEqualTo(Integer.MIN_VALUE);
assertThat(EXPECTED_PALETTE).contains(AppUser.computeColor(minHashId));
}
}

View File

@@ -20,13 +20,10 @@ import org.springframework.security.test.context.support.WithMockUser;
import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.springframework.test.web.servlet.MockMvc;
import org.mockito.ArgumentCaptor;
import java.time.LocalDateTime;
import java.util.List;
import java.util.UUID;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -150,30 +147,6 @@ class InviteControllerTest {
.andExpect(jsonPath("$.label").value("Für Familie"));
}
@Test
@WithMockUser(username = "admin@test.com", authorities = {"ADMIN_USER"})
void createInvite_forwardsGroupIdsToService() throws Exception {
UUID groupId = UUID.randomUUID();
AppUser admin = AppUser.builder().id(UUID.randomUUID()).email("admin@test.com").build();
when(userService.findByEmail("admin@test.com")).thenReturn(admin);
InviteToken savedToken = InviteToken.builder()
.id(UUID.randomUUID()).code("ABCDE12345").useCount(0).build();
when(inviteService.createInvite(any(), eq(admin))).thenReturn(savedToken);
when(inviteService.toListItemDTO(any(), anyString()))
.thenReturn(makeInviteDTO(savedToken.getId(), "ABCDE12345"));
String body = "{\"groupIds\":[\"" + groupId + "\"]}";
mockMvc.perform(post("/api/invites")
.contentType(MediaType.APPLICATION_JSON)
.content(body))
.andExpect(status().isCreated());
ArgumentCaptor<CreateInviteRequest> captor = ArgumentCaptor.forClass(CreateInviteRequest.class);
verify(inviteService).createInvite(captor.capture(), eq(admin));
assertThat(captor.getValue().getGroupIds()).containsExactly(groupId);
}
// ─── DELETE /api/invites/{id} ─────────────────────────────────────────────
@Test

View File

@@ -156,35 +156,6 @@ class InviteServiceTest {
assertThat(result.getGroupIds()).contains(g.getId());
}
@Test
void createInvite_throwsGroupNotFound_whenSubmittedGroupIdDoesNotExist() {
UUID unknownGroupId = UUID.randomUUID();
when(userService.findGroupsByIds(anyList())).thenReturn(List.of());
CreateInviteRequest req = new CreateInviteRequest();
req.setGroupIds(List.of(unknownGroupId));
assertThatThrownBy(() -> inviteService.createInvite(req, admin))
.isInstanceOf(DomainException.class)
.extracting(e -> ((DomainException) e).getCode())
.isEqualTo(ErrorCode.GROUP_NOT_FOUND);
}
@Test
void createInvite_doesNotThrowGroupNotFound_whenDuplicateGroupIdsSubmitted() {
UUID groupId = UUID.randomUUID();
UserGroup group = UserGroup.builder().id(groupId).name("Familie").build();
when(inviteTokenRepository.findByCode(anyString())).thenReturn(Optional.empty());
when(userService.findGroupsByIds(anyList())).thenReturn(List.of(group));
when(inviteTokenRepository.save(any())).thenAnswer(inv -> inv.getArgument(0));
CreateInviteRequest req = new CreateInviteRequest();
req.setGroupIds(List.of(groupId, groupId)); // same UUID submitted twice
// before deduplication: size(groups)==1 != size(submitted)==2 → false GROUP_NOT_FOUND
assertThatCode(() -> inviteService.createInvite(req, admin)).doesNotThrowAnyException();
}
// ─── redeemInvite ─────────────────────────────────────────────────────────
@Test

View File

@@ -1,78 +0,0 @@
package org.raddatz.familienarchiv.user;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.raddatz.familienarchiv.PostgresContainerConfig;
import org.raddatz.familienarchiv.config.FlywayConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.jdbc.test.autoconfigure.AutoConfigureTestDatabase;
import org.springframework.boot.data.jpa.test.autoconfigure.DataJpaTest;
import org.springframework.context.annotation.Import;
import java.time.LocalDateTime;
import java.util.Set;
import java.util.UUID;
import static org.assertj.core.api.Assertions.assertThat;
@DataJpaTest
@AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
@Import({PostgresContainerConfig.class, FlywayConfig.class})
class InviteTokenRepositoryIntegrationTest {
@Autowired InviteTokenRepository inviteTokenRepository;
@Autowired UserGroupRepository userGroupRepository;
@Autowired AppUserRepository appUserRepository;
private UserGroup group;
private AppUser admin;
@BeforeEach
void setUp() {
inviteTokenRepository.deleteAll();
userGroupRepository.deleteAll();
appUserRepository.deleteAll();
admin = appUserRepository.save(AppUser.builder().email("admin@test.com").password("pw").build());
group = userGroupRepository.save(UserGroup.builder().name("Familie").build());
}
// ─── existsActiveWithGroupId ──────────────────────────────────────────────
@Test
void existsActiveWithGroupId_returnsTrueForActiveInviteLinkedToGroup() {
inviteTokenRepository.save(token(t -> t));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isTrue();
}
@Test
void existsActiveWithGroupId_returnsFalseWhenInviteIsRevoked() {
inviteTokenRepository.save(token(t -> t.revoked(true)));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
}
@Test
void existsActiveWithGroupId_returnsFalseWhenInviteIsExpired() {
inviteTokenRepository.save(token(t -> t.expiresAt(LocalDateTime.now().minusDays(1))));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
}
@Test
void existsActiveWithGroupId_returnsFalseWhenInviteIsExhausted() {
inviteTokenRepository.save(token(t -> t.maxUses(1).useCount(1)));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
}
// ─── helpers ─────────────────────────────────────────────────────────────
private InviteToken token(java.util.function.UnaryOperator<InviteToken.InviteTokenBuilder> customizer) {
InviteToken.InviteTokenBuilder builder = InviteToken.builder()
.code(UUID.randomUUID().toString().replace("-", "").substring(0, 10))
.groupIds(new java.util.HashSet<>(Set.of(group.getId())))
.createdBy(admin);
return customizer.apply(builder).build();
}
}

View File

@@ -36,7 +36,6 @@ class UserServiceTest {
@Mock AppUserRepository userRepository;
@Mock UserGroupRepository groupRepository;
@Mock InviteTokenRepository inviteTokenRepository;
@Mock PasswordEncoder passwordEncoder;
@Mock AuditService auditService;
@InjectMocks UserService userService;
@@ -903,41 +902,4 @@ class UserServiceTest {
assertThat(result.getName()).isEqualTo("Familie");
assertThat(result.getPermissions()).containsExactlyInAnyOrder("READ_ALL", "WRITE_ALL");
}
// ─── deleteGroup ──────────────────────────────────────────────────────────
@Test
void deleteGroup_throwsConflict_whenActiveInviteReferencesGroup() {
UUID groupId = UUID.randomUUID();
when(inviteTokenRepository.existsActiveWithGroupId(groupId)).thenReturn(true);
assertThatThrownBy(() -> userService.deleteGroup(groupId))
.isInstanceOf(DomainException.class)
.extracting(e -> ((DomainException) e).getCode())
.isEqualTo(ErrorCode.GROUP_HAS_ACTIVE_INVITES);
}
@Test
void deleteGroup_deletesGroup_whenNoActiveInviteReferencesGroup() {
UUID groupId = UUID.randomUUID();
when(inviteTokenRepository.existsActiveWithGroupId(groupId)).thenReturn(false);
userService.deleteGroup(groupId);
verify(groupRepository).deleteById(groupId);
}
@Test
void createGroup_withNullPermissions_savesGroupWithEmptyPermissionSet() {
org.raddatz.familienarchiv.user.GroupDTO dto = new org.raddatz.familienarchiv.user.GroupDTO();
dto.setName("Leser");
dto.setPermissions(null);
UserGroup saved = UserGroup.builder().id(UUID.randomUUID()).name("Leser").build();
when(groupRepository.save(any())).thenReturn(saved);
userService.createGroup(dto);
verify(groupRepository).save(argThat(g -> g.getPermissions() != null && g.getPermissions().isEmpty()));
}
}

View File

@@ -1,2 +0,0 @@
logging.level.root=WARN
logging.level.org.raddatz=INFO

View File

@@ -1,246 +0,0 @@
# Production / staging Docker Compose for Familienarchiv.
#
# This is a self-contained file (not an overlay over docker-compose.yml).
# All services for the prod stack live here. Environment isolation is
# achieved via the docker compose project name:
#
# production: docker compose -f docker-compose.prod.yml -p archiv-production ...
# staging: docker compose -f docker-compose.prod.yml -p archiv-staging --profile staging ...
#
# Volumes, networks and containers are namespaced by the project name,
# so the two environments cohabit cleanly on the same host.
#
# Required env vars (provided by .env.production / .env.staging in CI):
# TAG image tag (release tag or "nightly")
# PORT_BACKEND, PORT_FRONTEND host-side ports (bound to 127.0.0.1 only)
# APP_DOMAIN e.g. archiv.raddatz.cloud / staging.raddatz.cloud
# POSTGRES_PASSWORD Postgres password
# MINIO_PASSWORD MinIO root password (admin operations only)
# MINIO_APP_PASSWORD MinIO application service-account password
# (least-privilege scope: archive bucket only)
# OCR_TRAINING_TOKEN token guarding ocr-service /train endpoint
# APP_ADMIN_USERNAME seeded admin email (e.g. admin@archiv.raddatz.cloud)
# APP_ADMIN_PASSWORD seeded admin password — CRITICAL: locked in on
# first deploy because UserDataInitializer only
# creates the account if the email does not exist
# MAIL_HOST, MAIL_PORT, SMTP relay (production only; staging uses mailpit)
# MAIL_USERNAME, MAIL_PASSWORD
# APP_MAIL_FROM sender address (e.g. noreply@raddatz.cloud)
# IMPORT_HOST_DIR absolute host path holding ONLY the ODS
# spreadsheet and PDFs for /admin/system mass
# import — mounted read-only at /import inside
# the backend. Compose refuses to start when
# this var is unset, so staging and prod cannot
# accidentally share an import source. Must be
# readable by the backend container's UID
# (currently root via the OpenJDK image — any
# world-readable directory works).
networks:
archiv-net:
driver: bridge
volumes:
postgres-data:
minio-data:
ocr-models:
ocr-cache:
services:
db:
image: postgres:16-alpine
restart: unless-stopped
environment:
POSTGRES_USER: archiv
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: archiv
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- archiv-net
healthcheck:
test: ["CMD-SHELL", "pg_isready -U archiv -d archiv"]
interval: 10s
timeout: 5s
retries: 5
minio:
# Pinned MinIO release for reproducible deploys. Bumped manually until
# Renovate is bootstrapped for these production images (see follow-up issue).
image: minio/minio:RELEASE.2025-02-28T09-55-16Z
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: archiv
MINIO_ROOT_PASSWORD: ${MINIO_PASSWORD}
volumes:
- minio-data:/data
networks:
- archiv-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
# Idempotent bucket bootstrap + service-account creation.
# Runs once per `docker compose up` and exits 0. The entrypoint is
# extracted to infra/minio/bootstrap.sh so the (non-trivial) idempotent
# logic is readable, reviewable, and unit-testable as a script rather
# than YAML-escaped shell.
create-buckets:
# Custom image bakes bootstrap.sh in at build time. A bind-mount fails on
# the Docker-out-of-Docker production runner because the host daemon
# resolves the relative path against the host filesystem, not the
# runner container's CWD. See #506 + infra/minio/Dockerfile.
build:
context: ./infra/minio
# Declare one-shot intent so `docker compose up -d --wait` treats
# exited(0) as success rather than "not running, fail". Pair with
# backend's `service_completed_successfully` dependency below. See #510.
restart: "no"
depends_on:
minio:
condition: service_healthy
networks:
- archiv-net
environment:
MINIO_PASSWORD: ${MINIO_PASSWORD}
MINIO_APP_PASSWORD: ${MINIO_APP_PASSWORD}
# Dev-only mail catcher; gated behind the staging profile so production
# never starts it. Staging workflow runs with `--profile staging`.
mailpit:
# Pinned for reproducibility; bumped manually until Renovate is bootstrapped.
image: axllent/mailpit:v1.29.7
restart: unless-stopped
profiles: ["staging"]
networks:
- archiv-net
healthcheck:
# TCP-port open check via BusyBox `nc`. The previous wget-based probe
# introduced a non-obvious binary dependency on the mailpit image; a
# future tag that ships without wget would silently disable the
# healthcheck. `nc` is part of BusyBox in the upstream image.
test: ["CMD-SHELL", "nc -z localhost 8025 || exit 1"]
interval: 10s
timeout: 5s
retries: 5
ocr-service:
build:
context: ./ocr-service
restart: unless-stopped
expose:
- "8000"
# Surya OCR loads ~5GB of transformer models at startup; first request
# triggers a further ~1GB Kraken model download into ocr-cache.
# CX42+ (16 GB RAM) honours the default. On a CX32 (8 GB) override with
# OCR_MEM_LIMIT=6g (slower first-request, fits the host).
mem_limit: ${OCR_MEM_LIMIT:-12g}
memswap_limit: ${OCR_MEM_LIMIT:-12g}
volumes:
- ocr-models:/app/models
- ocr-cache:/root/.cache
environment:
KRAKEN_MODEL_PATH: /app/models/german_kurrent.mlmodel
TRAINING_TOKEN: ${OCR_TRAINING_TOKEN}
OCR_CONFIDENCE_THRESHOLD: "0.3"
OCR_CONFIDENCE_THRESHOLD_KURRENT: "0.5"
# SSRF allowlist pinned explicitly to the internal MinIO hostname.
# In prod the OCR service only fetches PDFs from MinIO over the
# docker network; localhost/127.0.0.1 are dev-only sources and
# must NOT be reachable here. Do not widen to `*`.
ALLOWED_PDF_HOSTS: "minio"
networks:
- archiv-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 10s
timeout: 5s
retries: 12
start_period: 120s
backend:
image: familienarchiv/backend:${TAG:-nightly}
build:
context: ./backend
restart: unless-stopped
depends_on:
db:
condition: service_healthy
minio:
condition: service_healthy
ocr-service:
condition: service_healthy
# Gate startup on the bucket bootstrap. Without this, backend
# starts in parallel with create-buckets and may race the policy
# bind. Also tells compose's `up -d --wait` that create-buckets
# is a one-shot that must complete successfully. See #510.
create-buckets:
condition: service_completed_successfully
# Bound to localhost only — Caddy fronts external traffic.
ports:
- "127.0.0.1:${PORT_BACKEND}:8080"
# Host path holding the ODS spreadsheet + PDFs for the mass-import endpoint.
# Read-only; MassImportService only reads (Files.list / Files.walk on /import).
# Required — no default — so staging and prod cannot accidentally share an
# import source. CI workflows pin this per-env (see .gitea/workflows/).
volumes:
- ${IMPORT_HOST_DIR:?Set IMPORT_HOST_DIR to a host path holding the mass-import payload (ODS + PDFs). See docs/DEPLOYMENT.md.}:/import:ro
environment:
SPRING_DATASOURCE_URL: jdbc:postgresql://db:5432/archiv
SPRING_DATASOURCE_USERNAME: archiv
SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD}
# Application uses the bucket-scoped service account, not MinIO root.
S3_ENDPOINT: http://minio:9000
S3_ACCESS_KEY: archiv-app
S3_SECRET_KEY: ${MINIO_APP_PASSWORD}
S3_BUCKET_NAME: familienarchiv
S3_REGION: us-east-1
# No SPRING_PROFILES_ACTIVE — base application.yaml is production-ready
# (Swagger disabled, show-sql off, open-in-view false).
APP_BASE_URL: https://${APP_DOMAIN}
APP_ADMIN_USERNAME: ${APP_ADMIN_USERNAME}
APP_ADMIN_PASSWORD: ${APP_ADMIN_PASSWORD}
APP_OCR_BASE_URL: http://ocr-service:8000
APP_OCR_TRAINING_TOKEN: ${OCR_TRAINING_TOKEN}
MAIL_HOST: ${MAIL_HOST}
MAIL_PORT: ${MAIL_PORT:-587}
MAIL_USERNAME: ${MAIL_USERNAME:-}
MAIL_PASSWORD: ${MAIL_PASSWORD:-}
APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud}
SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true}
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true}
networks:
- archiv-net
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:8080/actuator/health | grep -q UP || exit 1"]
interval: 15s
timeout: 5s
retries: 10
start_period: 30s
frontend:
image: familienarchiv/frontend:${TAG:-nightly}
build:
context: ./frontend
target: production
restart: unless-stopped
depends_on:
backend:
condition: service_healthy
ports:
- "127.0.0.1:${PORT_FRONTEND}:3000"
environment:
# SSR fetches go inside the docker network; clients hit https://${APP_DOMAIN}
API_INTERNAL_URL: http://backend:8080
ORIGIN: https://${APP_DOMAIN}
networks:
- archiv-net
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:3000/login >/dev/null 2>&1 || exit 1"]
interval: 15s
timeout: 5s
retries: 10
start_period: 20s

View File

@@ -13,7 +13,7 @@ services:
ports:
- "${PORT_DB}:5432"
networks:
- archiv-net
- archive-net
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
interval: 5s
@@ -35,7 +35,7 @@ services:
- "${PORT_MINIO_API}:9000" # API Port
- "${PORT_MINIO_CONSOLE}:9001" # Web-Oberfläche
networks:
- archiv-net
- archive-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
@@ -56,7 +56,7 @@ services:
exit 0;
"
networks:
- archiv-net
- archive-net
# --- Mail catcher: Mailpit (dev only) ---
# Catches all outgoing emails and displays them in a web UI.
@@ -69,7 +69,7 @@ services:
- "${PORT_MAILPIT_UI:-8025}:8025" # Web UI
- "${PORT_MAILPIT_SMTP:-1025}:1025" # SMTP
networks:
- archiv-net
- archive-net
# --- OCR: Python microservice (Surya + Kraken) ---
# Single-node only: OCR training reloads the model in-process after each run.
@@ -99,7 +99,7 @@ services:
OCR_CLAHE_TILE_SIZE: "8" # CLAHE tile grid size (NxN tiles per page)
OCR_MAX_CACHED_MODELS: "2" # LRU cache; each model ~500 MB, so 2 = ~1 GB resident
networks:
- archiv-net
- archive-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 10s
@@ -150,7 +150,7 @@ services:
ports:
- "${PORT_BACKEND}:8080"
networks:
- archiv-net
- archive-net
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:8080/actuator/health | grep -q UP || exit 1"]
interval: 15s
@@ -163,7 +163,6 @@ services:
build:
context: ./frontend
dockerfile: Dockerfile
target: development # Dockerfile is multi-stage; default would be the production stage
container_name: archive-frontend
restart: unless-stopped
depends_on:
@@ -185,10 +184,10 @@ services:
ports:
- "${PORT_FRONTEND}:5173"
networks:
- archiv-net
- archive-net
networks:
archiv-net:
archive-net:
driver: bridge
volumes:

View File

@@ -63,7 +63,7 @@ Members of the cross-cutting layer have no entity of their own, no user-facing C
| `audit` | Append-only event store (`audit_log`) for all domain mutations. Feeds the activity feed and Family Pulse dashboard. | Consumed by 5+ domains; no user-facing CRUD of its own |
| `config` | Infrastructure bean definitions: `MinioConfig`, `AsyncConfig`, `WebConfig` | Framework infra; no business logic |
| `dashboard` | Stats aggregation for the admin dashboard and Family Pulse widget | Aggregates from 3+ domains; no owned entities |
| `exception` | `DomainException`, `ErrorCode` enum, `GlobalExceptionHandler` | Framework infra; consumed by every controller and service. Adding a new `ErrorCode` requires matching updates in `frontend/src/lib/shared/errors.ts` and all three `messages/*.json` locale files. |
| `exception` | `DomainException`, `ErrorCode` enum, `GlobalExceptionHandler` | Framework infra; consumed by every controller and service |
| `filestorage` | `FileService` — MinIO/S3 upload, download, presigned-URL generation | Generic service; consumed by `document` and `ocr` |
| `importing` | `MassImportService` — async ODS/Excel batch import | Orchestrates across `person`, `tag`, `document` |
| `security` | `SecurityConfig`, `Permission` enum, `@RequirePermission` annotation, `PermissionAspect` (AOP) | Framework infra; enforced globally across all controllers |

View File

@@ -27,22 +27,20 @@ This doc is the Day-1 checklist and operational reference. It links to the canon
```mermaid
graph TD
Browser -->|HTTPS| Caddy["Caddy (TLS termination)"]
Caddy -->|HTTP :3000| Frontend["Web Frontend\nSvelteKit Node adapter"]
Caddy -->|HTTP :5173| Frontend["Web Frontend\nSvelteKit / Node.js"]
Caddy -->|HTTP :8080| Backend["API Backend\nSpring Boot / Jetty :8080"]
Backend -->|JDBC :5432| DB[(PostgreSQL 16)]
Backend -->|S3 API :9000| MinIO[(MinIO)]
Backend -->|S3 API :9000| MinIO[(MinIO / Hetzner OBS)]
Backend -->|HTTP :8000 internal| OCR["OCR Service\nPython FastAPI"]
OCR -->|presigned URL| MinIO
Caddy -->|SSE proxy_pass| Backend
Browser -->|SSE direct| Backend
```
**Key facts:**
- Caddy terminates TLS and reverse-proxies to frontend (`:3000`) and backend (`:8080`). The Caddyfile is committed at [`infra/caddy/Caddyfile`](../infra/caddy/Caddyfile) and is installed on the host as `/etc/caddy/Caddyfile` (symlink).
- The host binds all docker-published ports to `127.0.0.1` only; Caddy is the sole external entry point.
- The OCR service has **no published port** — reachable only on the internal Docker network from the backend.
- SSE notifications transit Caddy (browser → Caddy → backend); the backend is never reachable directly from the public internet. The SvelteKit SSR layer is bypassed for SSE, but Caddy is not.
- The Caddyfile responds `404` on `/actuator/*` (defense in depth). Internal monitoring scrapes the backend on the docker network, not through Caddy.
- Production and staging cohabit on the same host via docker compose project names: `archiv-production` (ports 8080/3000) and `archiv-staging` (ports 8081/3001).
- Caddy terminates TLS and reverse-proxies to frontend and backend. See the Caddyfile in [`docs/infrastructure/production-compose.md`](infrastructure/production-compose.md).
- The OCR service has **no external port** — reachable only on the internal Docker network from the backend.
- SSE notifications go directly backend → browser (not via the SvelteKit SSR layer).
- Management port 8081 (Spring Actuator / Prometheus scrape) is internal only — the Caddy config blocks `/actuator/*` externally.
### OCR memory requirements
@@ -54,23 +52,19 @@ The OCR service requires significant RAM for model loading. The dev compose sets
| Hetzner CX32 | 8 GB | 6 GB | Accept reduced batch sizes and slower throughput |
| Hetzner CX22 | 4 GB | — | Disable the OCR service (`profiles: [ocr]`); run OCR on demand only |
A CX32 cannot honour the default `mem_limit: 12g` — set the `OCR_MEM_LIMIT=6g` env var (in `.env.production` / `.env.staging`, or as a Gitea secret consumed by the workflow) before deploying on a CX32. The prod compose interpolates this var with a 12g default.
A CX32 cannot honour a `mem_limit: 12g` — set it to `6g` in the prod overlay or use CX42.
### Dev vs production differences
| Concern | Dev (`docker-compose.yml`) | Prod (`docker-compose.prod.yml`) |
| Concern | Dev compose | Prod overlay |
|---|---|---|
| MinIO image tag | `minio/minio:latest` | Pinned `minio/minio:RELEASE.…` |
| Data persistence | Bind mounts `./data/postgres`, `./data/minio` | Named Docker volumes (`postgres-data`, `minio-data`) |
| MinIO credentials for backend | Root user/password | Service account `archiv-app` with bucket-scoped rights |
| Bucket creation | `create-buckets` helper | Same helper, plus service-account bootstrap on every up |
| Spring profile | `dev,e2e` (Swagger + e2e overrides) | unset — base `application.yaml` is production-ready |
| Mail | Mailpit (local catcher) | Real SMTP (production) / Mailpit via `profiles: [staging]` (staging) |
| Frontend image | Dev server, `target: development`, port 5173 | Node adapter, `target: production`, port 3000 |
| Host port binding | All published | Bound to `127.0.0.1` only; Caddy is the front door |
| Deploy method | `docker compose up -d` (manual) | Gitea Actions: `nightly.yml` (staging, cron) and `release.yml` (production, on `v*` tag) — both use `up -d --wait` |
| MinIO image tag | `minio/minio:latest` (unpinned) | Pinned in prod overlay |
| Data persistence | Bind mounts `./data/postgres`, `./data/minio` | Named Docker volumes |
| Bucket creation | `create-buckets` helper container | Pre-created in Hetzner console |
| Spring profile | `dev,e2e` (enables OpenAPI + Swagger UI) | `prod` |
| Mail | Mailpit (local catcher) | Real SMTP |
Full prod compose: [`docker-compose.prod.yml`](../docker-compose.prod.yml). Workflow files: [`.gitea/workflows/nightly.yml`](../.gitea/workflows/nightly.yml), [`.gitea/workflows/release.yml`](../.gitea/workflows/release.yml).
Full prod overlay: [`docs/infrastructure/production-compose.md`](infrastructure/production-compose.md).
---
@@ -97,7 +91,6 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
| `APP_BASE_URL` | Public-facing URL for email links | `http://localhost:3000` | YES (prod) | — |
| `APP_OCR_BASE_URL` | Internal URL of the OCR service | — | YES | — |
| `APP_OCR_TRAINING_TOKEN` | Secret token for OCR training endpoints | — | YES (prod) | YES |
| `IMPORT_HOST_DIR` | Absolute host path holding the ODS spreadsheet + PDFs for the `/admin/system` mass-import card. Mounted read-only at `/import` inside the backend (compose-only — backend reads via `app.import.dir`). Compose refuses to start when unset, so staging and prod cannot accidentally share the source. Convention: `/srv/familienarchiv-staging/import` and `/srv/familienarchiv-production/import` | — | YES (prod compose) | — |
| `MAIL_HOST` | SMTP host | `mailpit` (dev) | YES (prod) | — |
| `MAIL_PORT` | SMTP port | `1025` (dev) | YES (prod) | — |
| `MAIL_USERNAME` | SMTP username | — | YES (prod) | YES |
@@ -119,10 +112,9 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
| Variable | Purpose | Default | Required? | Sensitive? |
|---|---|---|---|---|
| `MINIO_ROOT_USER` | MinIO root username (dev compose only — prod compose hardcodes `archiv`) | `minio_admin` | YES (dev) | — |
| `MINIO_ROOT_PASSWORD` / `MINIO_PASSWORD` | MinIO root password. **Used only by the `mc admin` bootstrap in prod, never by the backend.** | `change-me` | YES | YES |
| `MINIO_APP_PASSWORD` | Password for the `archiv-app` service account that the backend uses. Bucket-scoped via `readwrite` policy on `familienarchiv`. Bootstrapped by `create-buckets`. | — | YES (prod) | YES |
| `MINIO_DEFAULT_BUCKETS` | Bucket name (dev compose only — prod compose hardcodes `familienarchiv`) | `archive-documents` | YES (dev) | — |
| `MINIO_ROOT_USER` | MinIO root username | `minio_admin` | YES | — |
| `MINIO_ROOT_PASSWORD` | MinIO root password | `change-me` | YES | YES |
| `MINIO_DEFAULT_BUCKETS` | Bucket name | `archive-documents` | YES | — |
### OCR service
@@ -132,105 +124,53 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
| `ALLOWED_PDF_HOSTS` | SSRF protection — comma-separated list of allowed PDF source hosts. **Do not widen to `*`** | `minio,localhost,127.0.0.1` | YES | — |
| `KRAKEN_MODEL_PATH` | Directory containing Kraken HTR models (populated by `download-kraken-models.sh`) | `/app/models/` | — | — |
| `BLLA_MODEL_PATH` | Kraken baseline layout analysis model path | `/app/models/blla.mlmodel` | — | — |
| `OCR_MEM_LIMIT` | Container memory cap for ocr-service in `docker-compose.prod.yml`. Set to `6g` on CX32 hosts; leave unset on CX42+ to use the 12g default | `12g` (prod compose default) | — | — |
---
## 3. Bootstrap from scratch
Production and staging deploy via Gitea Actions (`release.yml` on `v*` tag, `nightly.yml` on cron). The server itself only needs to host Caddy, Docker, and the runner — the workflows handle the rest.
> Full VPS provisioning steps are in [`docs/infrastructure/production-compose.md`](infrastructure/production-compose.md). This section covers the sequence and the security-critical steps.
### 3.1 Server one-time setup
### Security checklist — complete before first boot
> ⚠️ **These defaults ship in `.env.example` and `application.yaml`. Change them or you will have an insecure installation.**
- [ ] Set `APP_ADMIN_PASSWORD` (default: `admin123` — change before starting the backend)
- [ ] Set `APP_ADMIN_USERNAME` if you want a non-default admin login name (add to `.env` — not in `.env.example`)
- [ ] Rotate `POSTGRES_PASSWORD` from `change-me`
- [ ] Rotate `MINIO_ROOT_PASSWORD` from `change-me`
- [ ] Set a strong `APP_OCR_TRAINING_TOKEN` (backend) and the matching `TRAINING_TOKEN` (OCR service) — both must be the same value (`python3 -c "import secrets; print(secrets.token_hex(32))"`)
- [ ] Confirm `ALLOWED_PDF_HOSTS` is locked to your MinIO/S3 hostname — widening to `*` opens SSRF
- [ ] Set `SPRING_PROFILES_ACTIVE=prod` in the prod overlay (not `dev,e2e` — that exposes Swagger UI and `/v3/api-docs`)
- [ ] Use a dedicated MinIO service account for `S3_ACCESS_KEY` / `S3_SECRET_KEY`, not the root credentials
### Bootstrap sequence
```bash
# Base hardening
ufw default deny incoming && ufw allow 22/tcp && ufw allow 80/tcp && ufw allow 443/tcp && ufw enable
# /etc/ssh/sshd_config: PasswordAuthentication no, PermitRootLogin no
# 1. Copy and fill the env file
cp .env.example .env
# edit .env — complete the security checklist above first
# Install Caddy 2 (https://caddyserver.com/docs/install#debian-ubuntu-raspbian)
apt install caddy
# 2. (Production only) Create the MinIO / Hetzner OBS bucket in the console
# The dev compose has a create-buckets helper; production does not.
# Create the bucket named $MINIO_DEFAULT_BUCKETS with private access.
# Use the Caddyfile from the repo (replace path with the runner's clone target)
# CI DEPENDENCY: the nightly and release workflows run `systemctl reload caddy` to
# pick up committed Caddyfile changes. They find the file via this symlink — if it
# is absent or points elsewhere, the reload succeeds but serves stale config.
ln -sf /opt/familienarchiv/infra/caddy/Caddyfile /etc/caddy/Caddyfile
systemctl reload caddy
# 3. Start the stack (prod overlay — see docs/infrastructure/production-compose.md)
# docker-compose.prod.yml is NOT committed — create it from the guide above
docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d
# fail2ban — protect /api/auth/login from credential stuffing.
# Jail watches the Caddy JSON access log for 401 responses on
# /api/auth/login. The jail (maxretry=10 / findtime=10m / bantime=30m)
# and filter are committed under infra/fail2ban/ — symlink them in:
apt install fail2ban
ln -sf /opt/familienarchiv/infra/fail2ban/jail.d/familienarchiv.conf \
/etc/fail2ban/jail.d/familienarchiv.conf
ln -sf /opt/familienarchiv/infra/fail2ban/filter.d/familienarchiv-auth.conf \
/etc/fail2ban/filter.d/familienarchiv-auth.conf
systemctl reload fail2ban
# Verify after first deploy with:
# fail2ban-client status familienarchiv-auth
# fail2ban-regex /var/log/caddy/access.log familienarchiv-auth
# 4. Flyway migrations run automatically on backend start.
# Watch the backend log to confirm:
docker compose logs --follow --tail=100 backend
# Tailscale — used by the backup pipeline to reach heim-nas (follow-up issue)
curl -fsSL https://tailscale.com/install.sh | sh && tailscale up
# 5. Verify the stack is healthy
curl http://localhost:8080/actuator/health
# Expected: {"status":"UP"}
# Self-hosted Gitea runner — register against the repo with a runner token.
# This runner is assumed single-tenant: the deploy workflows write .env.*
# files to disk during execution (cleaned up unconditionally on completion).
# A multi-tenant runner would need to switch to stdin-piped env files.
# (See https://docs.gitea.com/usage/actions/quickstart for the register step.)
# 6. Open the app and log in with the admin credentials from .env
```
### 3.2 DNS records
```
archiv.raddatz.cloud A <server IP>
staging.raddatz.cloud A <server IP>
git.raddatz.cloud A <server IP>
```
### 3.3 Gitea secrets (Repo → Settings → Actions → Secrets)
| Secret | Used by | Notes |
|---|---|---|
| `PROD_POSTGRES_PASSWORD` | release.yml | strong unique password |
| `PROD_MINIO_PASSWORD` | release.yml | MinIO root password; used only at bootstrap |
| `PROD_MINIO_APP_PASSWORD` | release.yml | application service-account password |
| `PROD_OCR_TRAINING_TOKEN` | release.yml | `python3 -c "import secrets; print(secrets.token_hex(32))"` |
| `PROD_APP_ADMIN_USERNAME` | release.yml | e.g. `admin@archiv.raddatz.cloud` |
| `PROD_APP_ADMIN_PASSWORD` | release.yml | **⚠ locked permanently on first deploy** — see §3.5 |
| `STAGING_POSTGRES_PASSWORD` | nightly.yml | different from prod |
| `STAGING_MINIO_PASSWORD` | nightly.yml | different from prod |
| `STAGING_MINIO_APP_PASSWORD` | nightly.yml | different from prod |
| `STAGING_OCR_TRAINING_TOKEN` | nightly.yml | different from prod |
| `STAGING_APP_ADMIN_USERNAME` | nightly.yml | e.g. `admin@staging.raddatz.cloud` |
| `STAGING_APP_ADMIN_PASSWORD` | nightly.yml | locked on first staging deploy |
| `MAIL_HOST` | release.yml | SMTP relay hostname (prod only) |
| `MAIL_PORT` | release.yml | typically `587` |
| `MAIL_USERNAME` | release.yml | SMTP user |
| `MAIL_PASSWORD` | release.yml | SMTP password |
### 3.4 First deploy
```bash
# 1. Trigger nightly.yml manually (Repo → Actions → nightly → "Run workflow")
# Expected: docker compose up -d --wait succeeds for archiv-staging, then
# the workflow's "Smoke test deployed environment" step asserts:
# - https://staging.raddatz.cloud/login returns 200
# - HSTS header is present
# - /actuator/health returns 404 (defense-in-depth check)
# 2. (Optional) Re-verify manually
curl -I https://staging.raddatz.cloud/
# Expected: 200 (login page) with HSTS + X-Content-Type-Options headers
# 3. When staging looks healthy, push a v* tag to trigger release.yml
git tag v1.0.0 && git push origin v1.0.0
```
### 3.5 ⚠ Admin password is locked on first deploy
`UserDataInitializer` creates the admin user **only if the email does not exist**. The first successful deploy persists the admin password to the database. Changing `PROD_APP_ADMIN_PASSWORD` in Gitea secrets after that point has **no effect** — the secret is only consulted when the row is missing.
Before the first deploy: rotate `PROD_APP_ADMIN_PASSWORD` to a strong value. After the first deploy: change the admin password via the in-app account settings, not via the Gitea secret.
> **Do not use `docker-compose.ci.yml` locally** — it disables bind mounts that the dev workflow depends on.
---
@@ -284,23 +224,7 @@ docker exec -i archive-db psql -U ${POSTGRES_USER} ${POSTGRES_DB} < backup-YYYYM
### Planned — phase 5 of Production v1 milestone
Automated backup (nightly `pg_dump` + MinIO `mc mirror` over Tailscale to `heim-nas`) is a follow-up issue. Until that ships: **manual backups are the only recovery option.**
### Rollback
Each release tag corresponds to a docker image tag on the host daemon (built via DooD; no registry). Rolling back to a previous tag is one command:
```bash
TAG=v1.0.0 docker compose \
-f docker-compose.prod.yml \
-p archiv-production \
--env-file /opt/familienarchiv/.env.production \
up -d --wait --remove-orphans
```
If the rollback target image is no longer present on the host (host disk pruned, etc.), re-trigger `release.yml` for that tag from Gitea Actions UI — it rebuilds and redeploys.
**Flyway migrations are not auto-rolled-back.** If a release contained a destructive migration (drop column, rename table), a tag rollback brings the schema back to a previous app version but the data shape has already changed. For breaking schema changes, prefer a forward-only fix.
Automated backup (PostgreSQL WAL archiving + MinIO bucket replication) is planned in the Production v1 milestone phase 5. Until that ships: **manual backups are the only recovery option.**
---
@@ -333,18 +257,9 @@ bash scripts/download-kraken-models.sh
### Trigger a mass import (Excel/ODS)
**Dev:** drop the ODS spreadsheet + PDFs into `./import/` at the repo root — the dev compose bind-mounts it to `/import` automatically.
**Staging/production:**
1. Pre-stage the payload on the host. Convention: `/srv/familienarchiv-staging/import/` or `/srv/familienarchiv-production/import/`.
```bash
rsync -avh --progress ./import/ user@host:/srv/familienarchiv-staging/import/
```
2. Make sure `IMPORT_HOST_DIR=<host-path>` is set in `.env.staging` / `.env.production` (the nightly/release workflows already write this — see §3). Compose refuses to start without it.
3. Redeploy the stack so the bind mount picks up — or, if the mount is already in place, skip to step 4.
4. Call `POST /api/admin/trigger-import` (requires `ADMIN` permission), or click the "Import starten" button on `/admin/system`.
5. The import runs asynchronously — poll `GET /api/admin/import-status`, watch `/admin/system`, or tail the backend logs.
1. Place the import file in the `import/` bind mount on the backend container.
2. Call `POST /api/admin/trigger-import` (requires `ADMIN` permission).
3. The import runs asynchronously — poll `GET /api/admin/import-status` or watch backend logs.
---

View File

@@ -107,13 +107,6 @@ _See also [Briefwechsel](#briefwechsel-user-facing)._
---
## Infrastructure Terms
**archiv-app** — the bucket-scoped MinIO service account the backend uses to read and write the `familienarchiv` bucket. Distinct from the MinIO root account (`archiv`, used only by the bootstrap container for admin operations). Defined and provisioned in [`infra/minio/bootstrap.sh`](../infra/minio/bootstrap.sh) and consumed by the backend as `S3_ACCESS_KEY` in [`docker-compose.prod.yml`](../docker-compose.prod.yml). The attached `archiv-app-policy` grants `s3:GetObject/PutObject/DeleteObject` on `familienarchiv/*` and `s3:ListBucket/GetBucketLocation` on the bucket only — not the built-in `readwrite` policy which would grant `s3:*` on all buckets.
_See also [ADR-010 — MinIO stays self-hosted, not Hetzner OBS](./adr/010-minio-self-hosted-not-hetzner-obs.md)._
---
## Pending Terms
_Terms flagged as potentially ambiguous that have not yet been formally defined here. Add an entry above and remove it from this list when resolved._

View File

@@ -1,68 +0,0 @@
# ADR-008: SQL-level pagination for full-text search via window-function CTE
## Status
Accepted
## Context
`DocumentRepository.findAllMatchingIdsByFts` (formerly `findRankedIdsByFts`) returns all matching document IDs for a FTS query. `DocumentService.searchDocuments` then paginates in memory on the RELEVANCE sort path.
A pre-production audit against 1,520 documents measured:
```
rows_per_call: 911 / call (query: "walter")
```
At current scale this is acceptable — 911 UUIDs ≈ 14 KB, ms-level DB time. At 100 K+ documents two failure modes emerge:
1. **Memory**: a broad query returns ~60 K UUIDs ≈ 1 MB per request, multiplied by concurrent users.
2. **Latency**: the `LATERAL` join does work proportional to match-set size; at 60 K matches the FTS step alone exceeds 100 ms per query.
Tracked as finding **F-31 (High)** in the pre-production architectural review.
## Decision
Push pagination and rank ordering into SQL for the RELEVANCE sort path when no non-text filters are active (pure full-text search):
```sql
WITH q AS (
SELECT CASE WHEN websearch_to_tsquery('german', :query)::text <> ''
THEN to_tsquery('simple', regexp_replace(
websearch_to_tsquery('german', :query)::text,
'''([^'']+)''', '''\\1'':*', 'g'))
END AS pq
), matches AS (
SELECT d.id, ts_rank(d.search_vector, q.pq) AS rank
FROM documents d, q
WHERE d.search_vector @@ q.pq
)
SELECT id, rank, COUNT(*) OVER () AS total
FROM matches
ORDER BY rank DESC, id
OFFSET :offset LIMIT :limit
```
`COUNT(*) OVER ()` returns the full match count alongside each page row in a single round-trip — no separate count query needed.
`rows_per_call` for the FTS query drops from match-set size (911) to page size (≤ 50).
When non-text filters (date range, sender, receiver, tags, status) are also active, the existing path is preserved: `findAllMatchingIdsByFts` returns all ranked IDs, which are passed as an `IN` clause to the JPA Specification, and `totalElements` comes from the JPA `Page.getTotalElements()`. This keeps the count accurate across the combined filter set.
## Alternatives Considered
**1. Two-query approach (separate COUNT + paged SELECT)**
Correct, but doubles round-trips. The window function achieves the same result in one query.
**2. Capped result set with a user-visible warning**
Return at most N results (e.g. 500) and show "showing top 500 of many results". Simpler, but degrades UX for broad queries and doesn't reduce latency proportionally (still scans N rows).
**3. Full SQL rewrite combining FTS + JPA Specification filters**
Possible via a native query that embeds all filter predicates. Eliminates the in-memory SENDER/RECEIVER sort paths and the two-phase approach. High complexity, tight coupling to schema details, loses type-safe JPA Specification composition. Deferred to a future refactor if scale demands it.
## Consequences
- **`rows_per_call` for pure-text FTS searches drops to ≤ page size** — the primary metric.
- **SENDER and RECEIVER sort paths stay in-memory** for combined text+filter queries. For pure-text queries with SENDER/RECEIVER sort, the current approach (fetch all matched IDs, build spec, load all matched entities, sort in-memory) still runs. This is acceptable while the archive stays under ~10 K documents.
- **RELEVANCE sort with text+filters still loads the full filtered entity set in-memory.** The filtered set is typically much smaller than the raw FTS match set, so the cost is bounded by filter selectivity, not total match count.
- **`findAllMatchingIdsByFts` is retained** for: (a) the bulk-edit "select all" fast path (`findIdsForFilter`), (b) the document density chart (`getDensity`), and (c) the SENDER/RECEIVER in-memory sort paths.

View File

@@ -1,50 +0,0 @@
# ADR-009: Standalone `docker-compose.prod.yml`, not an overlay
## Status
Accepted
## Context
The repository's `docker-compose.yml` is a development stack: every service is built locally, ports are exposed on `0.0.0.0` for dev tooling, the frontend runs `npm run dev` with hot-reload, the backend is `spring-boot:run` with the dev profile, and there is no Caddy, no `archiv-app` service account, no admin-credential lock-in, no healthcheck-gated startup sequence. The dev stack reflects "single developer on a laptop", not "production on a single VPS".
The pre-merge design (issue #497, comment #8331) sketched two ways to add a production stack:
1. **Overlay** — keep `docker-compose.yml` as the base, add `docker-compose.prod.yml` as a `-f` overlay (`docker compose -f docker-compose.yml -f docker-compose.prod.yml up`). Compose merges the two files at runtime.
2. **Standalone** — make `docker-compose.prod.yml` a fully self-contained file that does not reference or merge with `docker-compose.yml` at all. Project-name namespacing (`-p archiv-production`, `-p archiv-staging`) keeps multi-environment deploys clean on a single host.
The earlier `docs/infrastructure/production-compose.md` notes assumed overlay because the original plan was to **remove** MinIO in production (replace with Hetzner Object Storage), so the prod file would only need to remove one service and add a few. With MinIO retained (see ADR-010), the prod stack diverges from dev in essentially every service: build vs pre-built image, target stage, port binding, env vars, healthcheck, restart policy, mem_limit, profile gating, service account, depends_on chain. Overlay would mostly be `override:` blocks that nullify the dev defaults — a fragile inversion.
## Decision
`docker-compose.prod.yml` is standalone. Production and staging both run it directly:
```
production: docker compose -f docker-compose.prod.yml -p archiv-production --env-file .env.production ...
staging: docker compose -f docker-compose.prod.yml -p archiv-staging --env-file .env.staging --profile staging ...
```
Environment isolation is achieved via the Docker Compose project name (`-p`). Volumes, networks, and containers are namespaced by the project name, so production and staging cohabit cleanly on the same host without interfering.
The dev `docker-compose.yml` is unchanged — `docker compose up` still works for developers, and its `frontend` service now specifies `target: development` explicitly so the new multi-stage Dockerfile builds the right stage.
## Alternatives Considered
| Alternative | Why rejected |
|---|---|
| Overlay (`-f base.yml -f prod.yml`) | With MinIO retained and most services differing across nearly every field, the overlay would consist mostly of `override:` blocks that null out dev defaults. Compose's merge semantics for nested keys (env, ports, healthcheck) are sharp — silent merges of port mappings, env-var entries, and depends_on edges cost reviewer hours. Standalone is one file the reader can hold in their head. |
| Two fully separate files (dev + prod) but with shared YAML anchors via `extends:` | `extends:` works across files but is a niche feature and is increasingly discouraged in compose v2. Reviewer load is higher than reading two flat files. |
| Generate prod compose from a template at deploy time (e.g. ytt, kustomize) | Adds a build-time step and a new tool to the operator toolchain. Justified for a fleet of 10+ environments; overkill for production + staging on one host. |
| Single compose file with environment-specific profiles | Compose profiles select which *services* run, not which *configuration* a service runs with. Using profiles to swap "build locally" vs "pull image" would smear dev and prod across one file. |
## Consequences
- The prod file can be read top-to-bottom without cross-referencing `docker-compose.yml`. Onboarding and review cost drops.
- Volume namespacing is automatic (`archiv-production_postgres-data`, `archiv-staging_postgres-data`) — no manual `volumes:` aliasing.
- Dev compose churn (e.g. swapping a dev port) cannot accidentally affect production. The two files are independent.
- The cost is duplication: identical environment variables (e.g. `POSTGRES_DB: archiv`) appear in both files. This duplication is bounded — there is no incentive to add more services that exist in both — and the alternative (overlay) carries its own duplication via `override:` boilerplate.
- The retired `docs/infrastructure/production-compose.md` narrative is trimmed to a pointer at the live files. The cost/sizing rationale is preserved there.
## Future Direction
If the deployment fleet ever grows beyond two environments on one host (e.g. add a `demo` environment, or shard staging across two VPS for load testing), revisit the templating decision. At three+ environments the duplication starts to bite and a template engine (kustomize or ytt) becomes attractive.

View File

@@ -1,53 +0,0 @@
# ADR-010: MinIO stays self-hosted on the production VPS
## Status
Accepted
## Context
`docs/infrastructure/production-compose.md` (pre-this-PR) sketched a production topology in which the application bucket migrates from in-cluster MinIO to Hetzner Object Storage (OBS, S3-compatible). The motivation was operational: one less service to back up, no MinIO RAM/disk pressure on the VPS, hand off durability to the hyperscaler.
Two facts revisited at pre-merge review (issue #497, comment #8331) changed the answer:
1. **Current data size is small.** The archive is ~13 GB of file uploads (Kurrent letters, scanned ODS files, attachment PDFs). Hetzner OBS billing on this size is dominated by the per-month base fee (~5 EUR/mo for the smallest unit), not capacity or egress. The break-even point against the VPS's existing disk is far above the current footprint.
2. **MinIO is already production-grade.** The dev stack uses MinIO; the backend already drives it via the AWS SDK v2 with a generic `S3_ENDPOINT`. Switching providers is a runtime env-var change (`S3_ENDPOINT`, `S3_ACCESS_KEY`, `S3_SECRET_KEY`) plus an `mc mirror` to copy objects. There is no application-level rewrite cost waiting.
If Hetzner OBS were a one-way-door (provider-specific SDK, complex IAM integration, multi-month migration), the decision would deserve a serious weighing. As reversible as the migration is, deferring it costs nothing.
## Decision
MinIO stays on the production VPS for the first launch. The application bucket is created and managed inside the docker-compose stack (`infra/minio/bootstrap.sh`). The backend uses a least-privilege service account (`archiv-app`) with a bucket-scoped IAM policy, not the MinIO root credentials.
Hetzner Object Storage is **explicitly deferred**, not rejected. The migration path is documented as a runbook in `docs/DEPLOYMENT.md` (when the trigger fires): provision an OBS bucket, run `mc mirror local-minio:/familienarchiv obs:/familienarchiv`, rotate the three env vars, restart the backend, decommission the MinIO service from `docker-compose.prod.yml`.
## Triggers to re-evaluate
Revisit the decision when **any** of the following holds:
- The `minio-data` volume exceeds 50 GB and is growing > 5 GB/month.
- MinIO healthcheck latency exceeds 200 ms p95 (signal of disk pressure on the host).
- The VPS upgrade required to keep MinIO healthy costs more per month than the equivalent OBS bucket + traffic.
- Backup of the MinIO volume to `heim-nas` over Tailscale (deferred follow-up) is implemented and consistently runs > 30 min nightly. At that point durability-as-a-service starts paying for itself.
The migration runbook in `docs/DEPLOYMENT.md` is the script for executing the swap when one of the triggers fires.
## Alternatives Considered
| Alternative | Why rejected (for now) |
|---|---|
| Migrate to Hetzner Object Storage in this PR | Premature. Adds an external dependency, locks the operator into the Hetzner ecosystem before the data has demonstrated it needs hyperscaler durability, blocks the PR on a migration that buys ~5 GB of headroom. |
| Migrate to S3 (AWS) for HA across regions | Way over-spec for a family archive. Egress cost would dwarf any benefit; durability concerns at this size are addressed by nightly off-site backup, not by multi-region replication. |
| Drop S3 abstraction entirely; store files directly on the VPS disk | Possible, but loses the bucket-policy IAM surface (least-privilege service account), loses presigned-URL flow (OCR service downloads files via short-lived URLs, not via shared filesystem), loses the migration path to OBS. The S3 indirection is cheap insurance. |
| Self-hosted on-VPS plus periodic `mc mirror` to Hetzner OBS for off-site backup | This is the **target** for the backup pipeline follow-up. Treated as backup, not primary — primary stays MinIO. |
## Consequences
- The production VPS sizing (Hetzner CX42, 16 GB RAM, 80 GB disk) must accommodate MinIO's working set. Current footprint leaves ample headroom.
- Backup of MinIO data is the operator's responsibility until the off-site `mc mirror` pipeline is implemented (deferred follow-up). The DEPLOYMENT.md rollback procedure explicitly flags this — manual backup is the only recovery option until the pipeline ships.
- The backend never sees the MinIO root password; it uses the `archiv-app` service account with a bucket-scoped IAM policy (see `infra/minio/bootstrap.sh`). A backend RCE/SSRF cannot escalate beyond the `familienarchiv` bucket.
- The migration to Hetzner OBS remains a small, well-understood runbook step rather than a major refactor. No application code, no SDK swap.
## Future Direction
When one of the triggers above fires, the migration is: provision OBS bucket → `mc mirror` → rotate three env vars → restart backend → remove MinIO service from compose. The bucket-scoped policy translates 1:1 to an OBS user policy (S3-compatible).

View File

@@ -1,58 +0,0 @@
# ADR-011: Single-tenant Gitea runner with secrets-on-disk env-files
## Status
Accepted
## Context
The deploy workflows (`.gitea/workflows/nightly.yml`, `release.yml`) execute on a self-hosted Gitea Actions runner. The runner has Docker-out-of-Docker access (the host's Docker socket is mounted into the runner), so `docker compose build` produces images on the host daemon and `docker compose up` consumes them directly — no registry hop.
Two workflow steps shape the security model:
1. **"Write env file"** — the workflow writes every required secret to `.env.staging` or `.env.production` on the runner's filesystem so that `docker compose --env-file` can consume them. The file lives on disk for the duration of the workflow.
2. **"Cleanup env file"** — the matching `if: always()` step deletes the env file after the workflow ends, regardless of success.
This shape only works under one operational assumption: **the runner is single-tenant**. The runner is owned by the same operator who owns the secrets, no other repositories run jobs on the same runner, and no untrusted code is executed (no public fork PRs trigger workflows). If any of those held, the env-file-on-disk approach would be a credential exposure path — a sibling job could read `.env.production`, or a malicious PR could exfiltrate the secrets via a step.
The alternative — `docker compose --env-file <(printf "..." )` (bash process substitution) — is technically supported and would keep secrets out of the on-disk filesystem. It is more secure under a multi-tenant runner but requires bash 4+ and is brittle inside YAML (the `printf` step would need to escape every secret value containing newlines, equals signs, or quotes).
## Decision
The runner is treated as single-tenant for the lifetime of the v1 deployment. The workflows write env-files to disk under that assumption and rely on the `if: always()` cleanup step to remove them. The operational assumption is documented in-comment at the top of both workflow files (`nightly.yml`, `release.yml`) so the next operator who considers adding a second repo or accepting public PRs has the trigger surfaced in front of them.
Concretely:
- The Gitea runner only runs jobs for `marcel/familienarchiv`.
- No public fork PRs trigger the workflows (Gitea defaults to requiring an explicit approval on first-time contributor PRs for the actions to run).
- Secrets are stored in Gitea repository secrets and injected via `${{ secrets.* }}`. They land in the env-file at workflow start and are removed at workflow end.
## Migration trigger
Switch to the multi-tenant-safe pattern when **any** of the following becomes true:
- A second repository starts using the same runner.
- A workflow accepts contributions that can run untrusted code (public PRs without manual approval).
- The runner is moved off the operator's controlled host onto shared infrastructure.
The migration path is one-step per workflow: replace the "Write env file" step with `--env-file <(printf '%s' "${{ secrets.STAGING_ENV_BLOB }}")` and store the full env-file as a single Gitea secret. The cleanup step is then unnecessary because the env-file never touches disk.
## Alternatives Considered
| Alternative | Why rejected (for now) |
|---|---|
| `--env-file <(printf "...")` via bash process substitution | More secure under multi-tenant. Brittle for multi-line / quoted secret values; harder to debug ("env file not found" with no diff to inspect). Justified once the trigger above fires. |
| Docker secrets (`docker secret create` + `compose secrets:`) | Designed for Swarm; outside of Swarm, compose secrets read from files anyway, so the on-disk surface is the same. Adds complexity without changing the threat model. |
| External secret manager (Vault, AWS Secrets Manager) | Adds a third-party dependency to the deploy path. For a family-archive deployment with one operator and one VPS, the cost outweighs the benefit at this scale. |
| GitHub-hosted ephemeral runners | Would require uploading the prod-deploy artifacts to a registry first, then a deploy step on the VPS connecting back. Inverts the current Docker-out-of-Docker simplicity for marginal security gain. The single-tenant self-hosted runner *is* ephemeral in practice — the secrets are written to a directory the runner controls, then deleted. |
## Consequences
- The runner host's filesystem is in the secret-trust boundary. The host is hardened per `docs/DEPLOYMENT.md` (ufw, fail2ban, Tailscale-only SSH).
- An operator who later adds a second repo to the runner without revisiting the workflows would silently break the trust assumption. The in-file comments at the top of `nightly.yml` and `release.yml` are the breadcrumb that surfaces the assumption at change time.
- The `if: always()` cleanup step is load-bearing: removing it (e.g. during a future workflow refactor) leaves credentials on disk between runs. Treat it as a permanent invariant.
- Workflow debuggability stays high: an operator who needs to know what env-file the deploy ran with can SSH onto the host while a workflow is in flight and `cat .env.staging` — useful for first-deploy diagnostics.
## Future Direction
When the trigger fires, migrate both workflows in a single PR: replace the "Write env file" step with a single `--env-file <(printf '%s' …)` invocation, drop the cleanup step, and consolidate the per-secret Gitea entries into a single multi-line `STAGING_ENV_BLOB` / `PROD_ENV_BLOB` secret. Single commit, both workflows, no application change.

View File

@@ -1,134 +0,0 @@
# ADR 012 — Browser-Mode Test Mocking Strategy
**Status:** Accepted
**Date:** 2026-05-11 (revised 2026-05-12)
**Issues:** [#535 — original incident](https://git.raddatz.cloud/marcel/familienarchiv/issues/535) · [#553 — revision](https://git.raddatz.cloud/marcel/familienarchiv/issues/553)
---
## Context
Vitest browser-mode tests (the `client` project, run with `@vitest/browser-playwright` / Chromium) use a different module resolution path than Node-environment tests. When a spec calls `vi.mock('some-module', factory)`, vitest registers a `ManualMockedModule`. At runtime, every time Chromium requests that module, a playwright route handler intercepts the request and calls the Node worker over **birpc** (`resolveManualMock`) to evaluate the factory and return the module body.
This is safe for modules that are imported **statically** at spec module-eval time (e.g. `$app/navigation`, `$env/static/public`): those requests resolve before the first test runs and well before any teardown occurs.
It is **unsafe** for modules that are imported **dynamically** (e.g. inside an `async onMount`, inside a lazy-loaded chunk): Chromium may fetch the module after the worker's birpc channel has already closed, producing:
```
Error: [birpc] rpc is closed, cannot call "resolveManualMock"
ManualMockedModule.factory node_modules/@vitest/browser/dist/index.js:3221:34
```
This raises an unhandled rejection that exits the vitest process with code 1, even though every test in the run reported green.
`pdfjs-dist` and `pdfjs-dist/build/pdf.worker.min.mjs?url` are loaded via `await Promise.all([import('pdfjs-dist'), import('pdfjs-dist/build/pdf.worker.min.mjs?url')])` inside `usePdfRenderer.svelte.ts::init()`, which is called from `onMount`. These dynamic imports triggered the race.
---
## Decision
**Prefer prop injection over `vi.mock(module, factory)` for any module that is loaded dynamically in browser-mode specs.**
### The libLoader pattern (for external rendering libraries)
When a component depends on a large external library loaded via dynamic import, extract the import into an injectable loader function with a production default:
```typescript
// usePdfRenderer.svelte.ts
type LibLoader = () => Promise<readonly [typeof import('pdfjs-dist'), { default: string }]>;
const defaultLibLoader: LibLoader = () =>
Promise.all([import('pdfjs-dist'), import('pdfjs-dist/build/pdf.worker.min.mjs?url')]);
export function createPdfRenderer(libLoader: LibLoader = defaultLibLoader) { ... }
```
The component threads the loader as an optional prop:
```svelte
<!-- PdfViewer.svelte -->
let { url, ..., libLoader = undefined } = $props();
const renderer = untrack(() => createPdfRenderer(libLoader));
```
Tests supply a synchronous fake — no `vi.mock` needed:
```typescript
const fakePdfjs = { GlobalWorkerOptions: ..., getDocument: vi.fn(), TextLayer: class {} };
const fakeLoader = vi.fn().mockResolvedValue([fakePdfjs, { default: '' }] as const);
render(PdfViewer, { url: '...', libLoader: fakeLoader });
```
### The test-host pattern (for component behaviour)
For components that fetch data or call services, the `*.test-host.svelte` pattern threads the dependency as a prop rather than mocking the module. See `PersonMentionEditor.test-host.svelte` for the canonical example.
---
## Binding invariant: factory bodies must be synchronous (#553)
The original revision of this ADR allowed `vi.mock(virtualModule, factory)` for SvelteKit/Vite virtual modules on the argument that their consumer imports were resolved at static-import time. **That reasoning is wrong.** What matters is what the **factory body** does, not where the mocked module is consumed.
`EnrichmentBlock.svelte.spec.ts` (issue #553) was statically imported and still produced the race: its `vi.mock('$app/stores', async () => { const mod = await import(...); return mod; })` factory performed a dynamic import in its body, and that body was invoked asynchronously when Chromium fetched the manually-mocked module — sometimes after the worker's birpc channel had already closed.
**Therefore: under `**/*.svelte.{test,spec}.ts`, every `vi.mock` factory body must be synchronous. No `await`, no `import(...)`.**
If a factory needs to share state with the spec (a mutable ref, a `vi.fn`, a writable store), use `vi.hoisted()` to lift the reference above `vi.mock`'s implicit hoist:
```ts
const { mockNavigating } = vi.hoisted(() => ({
mockNavigating: { type: null as string | null }
}));
vi.mock('$app/state', () => ({
get navigating() {
return mockNavigating;
}
}));
```
The getter defers the read until consumption time; `vi.hoisted` guarantees the reference is initialised before the (also hoisted) `vi.mock` factory runs. See `DropZone.svelte.spec.ts:9`, `NotificationBell.svelte.spec.ts:6-10`, and `EnrichmentBlock.svelte.spec.ts` for canonical examples.
### Architectural follow-on: prefer `$app/state` over `$app/stores`
`$app/stores` is the deprecated subscription-based store API; `$app/state` is the modern reactive proxy. New components should import from `$app/state`. As part of #553 we migrated `EnrichmentBlock.svelte` from `$app/stores.navigating` to `$app/state.navigating` with `!!navigating.type` — matching the pattern already established in `routes/aktivitaeten/+page.svelte:117` and `routes/documents/+page.svelte:261`. Migration eliminated the *need* to mock a store at all in that spec.
**Pattern note:** When an overlay or dropdown triggers a navigation action, use `<button type="button">` with an `onclick` handler that calls `goto(path)` — do **not** use `<a href="…">` with `e.preventDefault()`. SvelteKit registers its link interceptor as a capture-phase `document` listener, so it fires before the component's bubble-phase `onclick`. By the time `e.preventDefault()` runs the router has already initiated navigation, which tears down the vitest-browser Playwright orchestrator iframe. A `<button>` carries no `href`, so the capture-phase interceptor never fires. See `NotificationDropdown.svelte` for the canonical example.
**Pattern note (#553):** Browser-mode tests run with `data-sveltekit-preload-data="off"` (set in `src/test-setup.ts` via the client project's `setupFiles`). Hover-prefetch otherwise fires real fetch requests for route loader chunks; those requests go through the same Playwright route handler that serves mocked modules. An in-flight prefetch landing after iframe teardown can hit the handler with a closed birpc channel, raising an unhandled rejection.
---
## Binding invariant: one canonical ID per mocked module (#553 — duplicate-id hazard)
The sync-factory invariant above closes one named trigger of the `[birpc] rpc is closed` race. Investigation of a follow-up flake revealed a second, independent trigger: **the same resolved module URL mocked under two distinct ID strings** across or within spec files.
`@vitest/browser-playwright` registers a Playwright `page.context().route(...)` handler per `vi.mock` call. The predicate matches on the module's resolved URL. When two `vi.mock` calls reference the same module under different IDs — for example `'$lib/foo.svelte'` and `'$lib/foo.svelte.js'` (both resolve to the same Svelte rune-module URL) — the registry stores both predicates but the cleanup map only tracks the latest. The orphan route survives session teardown. When the next session loads the same module, the orphan fires, calls `await module.resolve()` against a closed birpc channel, and crashes the run.
This is fixed upstream in [vitest PR #10267](https://github.com/vitest-dev/vitest/pull/10267) (issue [#9957](https://github.com/vitest-dev/vitest/issues/9957)). Until that fix reaches a published `@vitest/browser-playwright` release, we close the gap from two sides:
**The rule.** Every mocked module must be referenced under exactly one ID string across the entire client test suite. Pick the spelling production code uses. For Svelte 5 rune modules (`*.svelte.ts`), the canonical form is the no-extension import (`'$lib/foo.svelte'`) — matches the source file basename and matches Svelte 5 convention. Never mix `.svelte.js` and `.svelte` for the same module across specs.
**Enforcement layers** (added in #553's second cycle, extending the four-layer chain above):
5. **In-suite meta-test** at `frontend/src/__meta__/no-duplicate-mock-ids.test.ts` globs `src/**/*.svelte.{test,spec}.ts`, extracts every `vi.mock` first-arg string, canonicalises by stripping a trailing `.js`/`.ts` after `.svelte`, and fails if any canonical ID is referenced under two or more distinct spellings. Same shape as `no-async-mock-factories.test.ts`.
6. **`patch-package` backport** of PR #10267 at `frontend/patches/@vitest+browser-playwright+4.1.0.patch`. Applied automatically by the `postinstall` hook. Closes the race at the route-handler level — even if a contributor reintroduces a duplicate-ID, the patched `register` handler unroutes the existing predicate before installing the new one.
**When to remove the patch.** Once `@vitest/browser-playwright` ships a release containing PR #10267, delete `patches/@vitest+browser-playwright+4.1.0.patch`. Bump the dependency to the version containing the fix. The in-suite meta-test stays — it's a cheap permanent guard against the contributor-facing pattern, independent of upstream library version.
---
## Consequences
- New browser-mode specs that need to stub an external library **must not** use `vi.mock(externalLib, factory)`. Add a loader/factory parameter to the underlying hook or service instead.
- The CI `unit-tests` job includes a permanent grep guard that fails the build if `rpc is closed` appears in any coverage run log. This catches regressions before they reach the acceptance criterion.
- Acceptance criterion for #535: 60 consecutive green `workflow_dispatch` CI runs against `main` after the fix is merged, with zero `rpc is closed` lines in any log.
- **Enforcement (six layers, defence in depth):**
1. **ESLint `no-restricted-syntax`** in `eslint.config.js` (scoped to `**/*.{spec,test}.ts`) flags two patterns: (a) the literal `vi.mock('pdfjs-dist', ...)` — enforces the libLoader pattern — and (b) any `vi.mock(..., async () => { ... await import(...) ... })` — enforces the synchronous-factory invariant. Both messages point at this ADR. Failure surfaces at save time.
2. **CI grep guard** in `.gitea/workflows/ci.yml` runs before the test suite launches. Mirrors the ESLint patterns with `grep -Pzn`. ~10s round-trip.
3. **In-suite meta-test** at `frontend/src/__meta__/no-async-mock-factories.test.ts` globs `src/**/*.svelte.{test,spec}.ts` and asserts none match the banned pattern. Catches at every vitest invocation — the layer hardest to disable.
4. **CI birpc assert** runs after the coverage step and fails the build if `[birpc] rpc is closed` appears in any log line. Catches the symptom even if all the upstream layers were bypassed.
5. **In-suite duplicate-ID meta-test** at `frontend/src/__meta__/no-duplicate-mock-ids.test.ts` enforces the one-canonical-ID-per-module rule from the duplicate-id-hazard section above.
6. **`patch-package` backport** at `frontend/patches/@vitest+browser-playwright+4.1.0.patch` closes the upstream race itself, applied via `postinstall`. To be removed when `@vitest/browser-playwright` releases [vitest PR #10267](https://github.com/vitest-dev/vitest/pull/10267).
- **Acceptance verification:** `coverage-flake-probe.yml` is a `workflow_dispatch`-triggered matrix workflow that runs the coverage suite 20× in parallel against a single SHA and asserts zero birpc lines. One fire, parallel cost, deterministic signal — replaces accumulating 20 sequential push events.
- **When to revisit the LibLoader home:** If three or more components adopt this pattern, consider extracting a shared `$lib/types/lib-loader.ts` or a generic `DynamicImportLoader<T>` type to avoid parallel type definitions across modules.

View File

@@ -1,63 +0,0 @@
# ADR-012: nsenter via privileged sibling container for host service management in CI
## Status
Accepted
## Context
The deploy workflows (`.gitea/workflows/nightly.yml`, `release.yml`) run job steps inside Docker containers under a Docker-out-of-Docker (DooD) setup: the Gitea runner container mounts the host Docker socket, and act_runner spawns a sibling container for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`).
This architecture has one significant limitation: **job containers cannot manage host services**. Specifically:
- Job containers are not in the host's PID, mount, UTS, network, or IPC namespaces.
- There is no systemd PID 1 inside a job container — `systemctl` has nothing to talk to.
- `sudo` is not present in standard container images; even if it were, it would not help.
- Caddy runs as a **host systemd service** (not a Docker container), managing TLS certificates via Let's Encrypt. It must be running on the host to serve port 443.
The deploy workflows need to tell Caddy to reload its config after each deploy so that committed Caddyfile changes are applied before the smoke test validates the public surface. Without a reload step, Caddy silently serves the previous config and the smoke test may pass against stale configuration.
## Decision
Use the host Docker socket (already mounted in every job container via `runner-config.yaml`) to spin up a **privileged sibling container** in the host PID namespace, then use `nsenter` to enter all host namespaces and call `systemctl reload caddy`:
```yaml
- name: Reload Caddy
run: |
docker run --rm --privileged --pid=host \
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
```
`nsenter -t 1 -m -u -n -p -i` enters the init process's mount, UTS, IPC, network, PID, and cgroup namespaces, giving `systemctl` a view of the real host systemd daemon.
**Alpine is used** instead of Ubuntu: ~5 MB vs ~70 MB pull size, no unnecessary tooling. `util-linux` (which ships `nsenter`) is installed at run time; apk add takes ~1 s on the warm VPS cache. The image digest is pinned so any upstream change requires an explicit Renovate bump PR.
**`reload` not `restart`**: reload sends SIGHUP so Caddy re-reads its config in-process without dropping TLS connections or in-flight requests.
**No sudoers entry is required**: the Docker socket already grants root-equivalent host access. This pattern makes existing implicit privileges explicit rather than introducing new ones.
This decision applies the same pattern to both `nightly.yml` and `release.yml` since both deploy the app stack and must apply Caddyfile changes before smoke-testing the public surface.
## Alternatives Considered
| Alternative | Why rejected |
|---|---|
| `sudo systemctl reload caddy` in the job container | No systemd PID 1 inside the container — `systemctl` has nothing to connect to. `sudo` is not present in container images and would not help even if it were. |
| Caddy admin API (`curl localhost:2019/load`) | Job containers do not share the host network namespace; `localhost:2019` on the host is unreachable. Exposing `:2019` on a host-bound port would add a network attack surface with no benefit over the current approach. |
| SSH from the job container to the VPS host | Requires storing an SSH private key as a CI secret, managing authorized_keys on the host, and opening an inbound SSH path from the container. Adds key management overhead for a pattern that the Docker socket already enables more directly. |
| Running Caddy as a Docker container (instead of host service) | Caddy manages TLS certificates via Let's Encrypt; running it in Docker complicates certificate persistence and renewal. As a host service, cert storage is straightforward and restarts do not risk rate-limit issues. This would be a larger infrastructure change unrelated to the CI gap. |
## Consequences
- The runner host's Docker socket access is now a capability relied upon for host service management, not just for running `docker compose` commands. This is stated explicitly in the YAML comment so future reviewers understand the trust boundary.
- The Caddyfile symlink on the VPS (`/etc/caddy/Caddyfile → /opt/familienarchiv/infra/caddy/Caddyfile`) is a required contract for CI to succeed. It is documented in `docs/DEPLOYMENT.md §3.1` and `docs/infrastructure/ci-gitea.md`. If the symlink is absent or mis-pointed, `systemctl reload caddy` succeeds but Caddy serves stale config.
- Renovate will create bump PRs when a new Alpine 3.21 digest is published. Because the container runs `--privileged --pid=host`, these bump PRs must be reviewed manually and must not be auto-merged. A `packageRule` in `renovate.json` enforces this.
- The step is duplicated between `nightly.yml` and `release.yml` (tracked in issue #539 for extraction into a composite action).
- If Caddy is not running when the step executes, `systemctl reload` exits non-zero and the workflow aborts before the smoke test — preventing a misleading "port 443 refused" curl error.
## References
- `docs/infrastructure/ci-gitea.md` §"Running host-level commands from CI (nsenter pattern)" — full operational context, troubleshooting guide
- `docs/DEPLOYMENT.md` §3.1 — Caddyfile symlink bootstrap step
- ADR-011 — single-tenant runner trust model (Docker socket access scope)

View File

@@ -1,92 +0,0 @@
# ADR 013 — Client-Project Branch Coverage Threshold
**Status:** Accepted
**Date:** 2026-05-14
**Issues:** [#556 — threshold drop](https://git.raddatz.cloud/marcel/familienarchiv/issues/556) · [#496 — long-tail-grind tracking](https://git.raddatz.cloud/marcel/familienarchiv/issues/496)
---
## Context
The browser-mode component test suite (`vitest.client-coverage.config.ts`) enforces Istanbul coverage thresholds across `lines`, `functions`, `branches`, and `statements`. The `branches` metric was set to 80%, but the codebase sits at **75%** — below the gate — causing every CI run of `unit-tests` and `coverage-flake-probe` to fail on this check alone, even when all tests are green.
**Measured baseline (2026-05-14, branch `feat/issue-553-birpc-async-mock-factory`, head `2e6cc346`):**
```
branches: 75% (below the 80% gate — reason for this ADR)
lines: ≥ 80%
functions: ≥ 80%
statements: ≥ 80%
```
Reproducer:
```bash
cd frontend && npm ci && npx vitest run -c vitest.client-coverage.config.ts --coverage
```
### The long-tail-grind problem
In Istanbul's branch accounting, when a child component gains test coverage its branches are added to the parent's denominator. A child moving from 40% → 80% coverage can drag a parent from 78% → 72% because more branches in the call graph become reachable and must be covered. This is not a bug — it is how branch accounting works — but it means that on a large SvelteKit application the denominator grows with every coverage improvement, making an arbitrary 80% ceiling a constant grind. Per #496, the expected cost to reach 80% branches from 75% is 30100+ commits with no guarantee of stability.
### Why this layer is different
The 80% branch floor used for backend unit/integration tests is appropriate for Java service code and permission logic. Browser-mode component coverage measures Svelte template branches: conditional class bindings, `{#if}` blocks, empty/loaded/error state guards. These branches have a fundamentally different accounting model and a higher inherent denominator. This ADR **only** lowers the browser-mode component gate; the backend test coverage gates are unaffected.
### Security-relevant uncovered components
The following auth/permission-boundary components currently have low or zero branch coverage. When ratchet-up work begins (see below), these are the highest-priority targets:
- `src/routes/login/+page.svelte`
- `src/routes/forgot-password/+page.svelte`
- `src/routes/reset-password/+page.svelte`
- `src/routes/register/+page.svelte`
Note: the 75% figure already reflects the absence of coverage on these files. Lowering the gate does not create this gap — it makes the existing state legible.
---
## Decision
Drop the `branches` threshold from `80``75` in `frontend/vitest.client-coverage.config.ts`. Leave `lines`, `functions`, and `statements` at `80`.
The 75% figure matches the measured current state, allowing CI to pass while deliberate coverage improvement work (tracked in #496) continues without blocking other PRs. The asymmetry in the thresholds block is intentional and documented with an inline comment pointing here.
---
## Ratchet Rule
The branches threshold ratchets **up by 3 percentage points** when the rolling 3-PR-average client-project branches figure on `main` stays at or above `threshold + 3pp` for ≥ 30 consecutive days. Direction is **up-only** — never lower the floor below 75 without a new ADR superseding this one. Manual today (verify before any `vitest.client-coverage.config.ts` edit); a future automation issue may codify the check.
Concretely:
- When `main` sustains ≥ 78% branches across 3 consecutive PRs for 30 days → raise gate to 78%
- When `main` sustains ≥ 81% branches across 3 consecutive PRs for 30 days → raise gate back to 80%
---
## Non-goals
- **Not** raising actual branch coverage — that is #496's job, tracked separately.
- **Not** touching the server-project coverage configuration (`vitest.config.ts`) — only the client project hits the long-tail-grind pattern.
- **Not** removing or relaxing any existing test files, `skipIf` guards, or axe-playwright accessibility runs.
---
## Consequences
**Easier:**
- CI unblocked — `unit-tests` and `coverage-flake-probe` jobs pass when all tests are green
- The ratchet rule creates a concrete, observable path back to 80%
**Harder:**
- The gate now has near-zero headroom — any branch regression that drops below 75% will fail CI immediately
- The 75% floor must not be treated as a permanent ceiling; the ratchet discipline requires active attention
---
## References
- [#496 — Branch coverage long-tail grind](https://git.raddatz.cloud/marcel/familienarchiv/issues/496)
- [#556 — This threshold drop](https://git.raddatz.cloud/marcel/familienarchiv/issues/556)
- [ADR 012 — Browser-Mode Test Mocking Strategy](./012-browser-test-mocking-strategy.md)
- `frontend/vitest.client-coverage.config.ts` — thresholds block (lines 4451)

View File

@@ -1,122 +0,0 @@
# ADR 014 — Pin actions/upload-artifact to v3 (Gitea act_runner v4 protocol incompatibility)
**Status:** Accepted
**Date:** 2026-05-14
**Issues:** [#557 — re-regression](https://git.raddatz.cloud/marcel/familienarchiv/issues/557) · [#14 — original incident](https://git.raddatz.cloud/marcel/familienarchiv/issues/14)
---
## Context
`actions/upload-artifact` is available in two incompatible major versions. The v4 client
uploads via a GitHub-specific artifact API that is **not implemented** in Gitea's
`act_runner` (the self-hosted CI substrate established by ADR-011). When a workflow step
uses `actions/upload-artifact@v4` on this runner, `act_runner` returns a non-zero exit
code from the v4 client even when all tests pass, producing:
> green test suite — red job status — no artifact uploaded
The failure lands in the upload step, _after_ the test output, making it hard to diagnose
from the build log.
### Incident history
| Date | Commit | Event |
|---|---|---|
| 2026-03-19 | `9f3f022e` | Original downgrade: `upload-artifact@v4 → v3` |
| 2026-03-19 | `4142c7cd` | Rationale committed; closes #14 |
| 2026-05-05 | `410b91e2` | Re-regression: upgraded back to v4 without referencing #14 |
| 2026-05-14 | this PR | Second downgrade + ADR + grep guard |
The root cause of the re-regression was institutional-memory failure: the original
rationale was captured only in a commit body, invisible at the point of change (the
`uses:` line). This ADR, the inline comments, and the grep guard are the three
defence layers that replace that missing breadcrumb.
---
## Decision
**Pin all `actions/upload-artifact` and `actions/download-artifact` call sites to `@v3`.**
Both action families share the same v4 protocol incompatibility with `act_runner`.
Pinning to the major tag (`@v3`) keeps us on the latest v3 patch without Renovate noise.
Three call sites are pinned:
- `.gitea/workflows/ci.yml` — "Upload coverage reports" step
- `.gitea/workflows/ci.yml` — "Upload screenshots" step
- `.gitea/workflows/coverage-flake-probe.yml` — "Upload coverage log on failure" step
Each pinned `uses:` line carries a load-bearing inline comment:
```yaml
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- uses: actions/upload-artifact@v3
```
A CI grep guard enforces the constraint automatically (see below).
---
## Consequences
### Enforcement layers (defence in depth)
1. **Inline comments** on every `uses:` line — visible at the point of change.
2. **CI grep guard** in `.gitea/workflows/ci.yml` ("Assert no (upload|download)-artifact
past v3") — fails the build if a future commit re-introduces `@v4` or higher on any
workflow file. Anchored to YAML `uses:` lines to avoid false positives on embedded
shell strings. Includes a self-test that proves the regex catches v4+ before scanning
the repo.
3. **This ADR** — canonical rationale; cross-referenced by comments and guard message.
### How to spot the symptom
- Test suite output shows green (vitest, surefire, pytest all exit 0)
- CI job status shows red
- Artifacts section of the run is empty
- Build log shows a non-zero exit from the `Upload …` step immediately after green tests
### `@v3` maintenance-mode status
GitHub placed `actions/upload-artifact@v3` in maintenance mode (no new features) but it
has not been removed and carries no known unpatched CVE as of this writing. If GitHub
publishes a v3-specific security advisory, that is an additional trigger to re-evaluate
(see upgrade conditions below).
### When to remove this pin
Re-evaluate pinning **when either condition is met:**
1. `gitea/act_runner` ships a release with v4 artifact protocol support. Track upstream:
<https://gitea.com/gitea/act_runner>
2. `actions/upload-artifact@v3` acquires an unpatched CVE that cannot be mitigated
at the runner level.
When upgrading: remove the grep guard step, update all three `uses:` lines, remove the
inline comments, and update this ADR's status to Superseded.
---
## Alternatives
### SHA pinning (`uses: actions/upload-artifact@<sha>`)
More secure against action repository compromise, but adds Renovate update friction
and is disproportionate for a self-hosted, single-tenant Gitea instance with one
trusted contributor (ADR-011). Rejected.
### Minor/patch pinning (`@v3.4.0`)
Avoids Renovate PRs but freezes us on a specific patch. The v3 major track is in
maintenance mode — minor pinning has no benefit and would require manual updates
for any v3 security patches. Rejected.
### Renovate `packageRules` bypass
Would prevent automated PRs from proposing v4. Not needed while Renovate is not
configured for this repository. Revisit if Renovate is introduced.
### Migrating the runner to a v4-compatible Gitea release
Out of scope for this issue. A separate decision; tracked in #557's non-goals.

View File

@@ -6,27 +6,23 @@ title Container Diagram: Familienarchiv
Person(user, "User", "Admin or family member")
System_Ext(mail, "Email Service", "SMTP server. Delivers notification and password-reset emails.")
Container(caddy, "Reverse Proxy", "Caddy 2 (host-installed)", "TLS termination (auto Let's Encrypt). Routes /api/* to backend:8080, everything else to frontend:3000. Responds 404 on /actuator/* and adds HSTS, X-Content-Type-Options, Referrer-Policy headers.")
System_Boundary(archiv, "Familienarchiv (Docker Compose)") {
Container(frontend, "Web Frontend", "SvelteKit / Node adapter / port 3000", "Server-side rendered UI. Handles auth session cookies, document search and viewer, transcription editor, annotation layer, family tree (Stammbaum), stories (Geschichten), activity feed (Chronik), enrichment workflow, and admin panel.")
Container(backend, "API Backend", "Spring Boot 4 / Java 21 / Jetty / port 8080", "REST API. Implements document management, search, user auth, file upload/download, transcription, OCR orchestration, and SSE notifications. Trusts X-Forwarded-* headers from Caddy.")
Container(frontend, "Web Frontend", "SvelteKit / Node.js", "Server-side rendered UI. Handles auth session cookies, document search and viewer, transcription editor, annotation layer, family tree (Stammbaum), stories (Geschichten), activity feed (Chronik), enrichment workflow, and admin panel.")
Container(backend, "API Backend", "Spring Boot 4 / Java 21 / Jetty", "REST API. Implements document management, search, user auth, file upload/download, transcription, OCR orchestration, and SSE notifications.")
Container(ocr, "OCR Service", "Python FastAPI / port 8000", "Handwritten text recognition (HTR) and OCR microservice. Single-node by design — see ADR-001. Reachable only on the internal Docker network; no external port exposed.")
ContainerDb(db, "Relational Database", "PostgreSQL 16", "Stores document metadata, persons, users, permission groups, tags, transcription blocks, audit log, and Spring Session data.")
ContainerDb(storage, "Object Storage", "MinIO (S3-compatible)", "Stores the actual document files (PDFs, scans). Backend uses a bucket-scoped service account (archiv-app), not MinIO root.")
Container(mc, "Bucket / Service-Account Init", "MinIO Client (mc)", "One-shot container on startup. Idempotent: creates the archive bucket, the archiv-app service account, and attaches the readwrite policy.")
ContainerDb(storage, "Object Storage", "MinIO (S3-compatible)", "Stores the actual document files (PDFs, scans). Objects keyed as documents/{UUID}_{filename}.")
Container(mc, "Bucket Init Helper", "MinIO Client (mc)", "One-shot container on startup. Creates the archive bucket with private access policy.")
}
Rel(user, caddy, "HTTPS", "TLS 1.2/1.3")
Rel(caddy, frontend, "Reverse proxies non-/api requests", "HTTP / loopback:3000")
Rel(caddy, backend, "Reverse proxies /api/*", "HTTP / loopback:8080")
Rel(user, frontend, "Uses", "HTTPS / Browser")
Rel(frontend, backend, "API requests with Basic Auth token", "HTTP / REST / JSON")
Rel(backend, user, "SSE notifications (server-sent events)", "HTTP / SSE — fronted by Caddy")
Rel(backend, user, "SSE notifications (server-sent events)", "HTTP / SSE — direct backend-to-browser")
Rel(backend, db, "Reads and writes metadata and sessions", "JDBC / SQL")
Rel(backend, storage, "Uploads and streams document files using archiv-app service account", "HTTP / S3 API (AWS SDK v2)")
Rel(backend, storage, "Uploads and streams document files", "HTTP / S3 API (AWS SDK v2)")
Rel(backend, ocr, "OCR job requests with presigned MinIO URL", "HTTP / REST / JSON")
Rel(backend, mail, "Sends notification and password-reset emails (optional)", "SMTP")
Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
Rel(mc, storage, "Bootstraps bucket + service account on startup", "MinIO Client CLI")
Rel(mc, storage, "Creates bucket on startup", "MinIO Client CLI")
@enduml

View File

@@ -1,49 +1,26 @@
@startuml
title Authentication Flow (behind Caddy reverse proxy)
title Authentication Flow
actor User
participant Browser
participant "Caddy (TLS termination)" as Caddy
participant "Frontend (SvelteKit)" as Frontend
participant "Backend (Spring Boot)" as Backend
participant PostgreSQL as DB
User -> Browser: Enter email + password
Browser -> Caddy: HTTPS POST /login (form action)
note right of Caddy
Caddy terminates TLS and forwards
to Frontend over HTTP with:
X-Forwarded-Proto: https
X-Forwarded-For: <client IP>
X-Forwarded-Host: archiv.raddatz.cloud
end note
Caddy -> Frontend: HTTP POST /login\n+ X-Forwarded-Proto: https
Browser -> Frontend: POST /login (form action)
Frontend -> Frontend: Base64 encode "email:password"
Frontend -> Backend: GET /api/users/me\nAuthorization: Basic <token>\n+ X-Forwarded-Proto: https
note right of Backend
server.forward-headers-strategy: native
Jetty's ForwardedRequestCustomizer
reads X-Forwarded-Proto so
request.getScheme() returns "https".
end note
Frontend -> Backend: GET /api/users/me\nAuthorization: Basic <token>
Backend -> Backend: Spring Security parses Basic Auth
Backend -> DB: SELECT user WHERE email=?
DB --> Backend: AppUser + groups + permissions
Backend -> Backend: BCrypt.matches(password, hash)
Backend --> Frontend: 200 OK — UserDTO
Frontend -> Caddy: Set-Cookie: auth_token=<base64>\n(httpOnly, **Secure**, SameSite=strict, maxAge=86400)
note right of Frontend
Secure flag is set because the
request scheme observed by the
app is https (forwarded by Caddy).
end note
Caddy -> Browser: HTTPS 200 + Set-Cookie
Browser -> Caddy: HTTPS GET / (next request)
Caddy -> Frontend: HTTP GET / + X-Forwarded-Proto: https
Frontend -> Browser: Set-Cookie: auth_token=<base64>\n(httpOnly, SameSite=strict, maxAge=86400)
Browser -> Frontend: GET / (next request)
Frontend -> Frontend: hooks.server.ts reads auth_token cookie
Frontend -> Backend: GET /api/users/me\nAuthorization: Basic <token>
Backend --> Frontend: 200 OK — user in event.locals
Frontend --> Caddy: rendered page
Caddy --> Browser: HTTPS 200
Frontend --> Browser: Render page with user context
@enduml

View File

@@ -4,109 +4,16 @@ This document covers the Gitea Actions CI workflow for Familienarchiv, including
---
## Runner Architecture
## Self-Hosted Runner Provisioning
Familienarchiv uses **two runners** on the same Hetzner VPS:
| Runner | Purpose | Config |
|---|---|---|
| `gitea` (Docker container) | Hosts Gitea itself | `infra/gitea/docker-compose.yml` |
| `gitea-runner` (Docker container) | Runs all CI and deploy jobs | `infra/gitea/docker-compose.yml` + `/root/docker/gitea/runner-config.yaml` |
Both containers live in the `gitea_gitea` Docker network on the VPS. The runner connects to Gitea via the LAN IP so job containers (which don't share the `gitea_gitea` network) can also reach it.
### Docker-out-of-Docker (DooD)
The `gitea-runner` container mounts the host Docker socket (`/var/run/docker.sock`). When a workflow job runs, act_runner spawns a **sibling container** for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`), enabling `docker compose` calls in workflow steps.
### Running host-level commands from CI (nsenter pattern)
Job containers are unprivileged and do not share the host's PID/mount/network namespaces. Commands like `systemctl` that target the host daemon are therefore unavailable by default. When a workflow step needs to manage a host service (e.g. `systemctl reload caddy`), it uses the Docker socket to spin up a **privileged sibling container** in the host PID namespace:
```yaml
- name: Reload Caddy
run: |
docker run --rm --privileged --pid=host \
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
```
`nsenter -t 1 -m -u -n -p -i` enters the init process's mount, UTS, IPC, network, PID, and cgroup namespaces, giving `systemctl` a view of the real host systemd. No sudoers entry is required — the Docker socket already grants root-equivalent host access.
Alpine is used instead of Ubuntu: ~5 MB vs ~70 MB, and the digest is pinned to a specific sha256 so any upstream change requires an explicit Renovate bump PR. `util-linux` (which ships `nsenter`) is not part of the Alpine base image but is installed at run time in ~1 s from the warm VPS cache.
#### Why not `sudo systemctl` in the job container?
Job containers run as root inside an unprivileged Docker namespace. There is no systemd PID 1 inside the container — `systemctl` would attempt to reach a socket that does not exist. `sudo` is not present in container images and would not help even if it were.
#### Why not Caddy's admin API?
Caddy ships a localhost admin API at `:2019` by default. Job containers do not share the host network namespace, so they cannot reach `localhost:2019` on the host. Exposing `:2019` on a host-bound port to make it reachable would add a network attack surface with no benefit over the current approach.
### Caddyfile symlink contract
The deploy workflows reload Caddy to pick up committed Caddyfile changes. This relies on a symlink that must exist on the VPS:
```
/etc/caddy/Caddyfile → /opt/familienarchiv/infra/caddy/Caddyfile
```
Created once during server bootstrap (see `docs/DEPLOYMENT.md §3.1`). Verify with:
Gitea Actions requires self-hosted runners. GitHub Actions provides `ubuntu-latest` for free; on Gitea you run the runner yourself.
```bash
ls -la /etc/caddy/Caddyfile
# Expected: lrwxrwxrwx ... /etc/caddy/Caddyfile -> /opt/familienarchiv/infra/caddy/Caddyfile
# On the VPS — register a Gitea Actions runner
docker run -d --name gitea-runner --restart unless-stopped -v /var/run/docker.sock:/var/run/docker.sock -v gitea-runner-data:/data -e GITEA_INSTANCE_URL=https://gitea.example.com -e GITEA_RUNNER_REGISTRATION_TOKEN=<token-from-gitea-settings> -e GITEA_RUNNER_NAME=vps-runner-1 -e GITEA_RUNNER_LABELS=ubuntu-latest:docker://node:20-bullseye gitea/act_runner:latest
```
### Troubleshooting: Reload Caddy step fails
**Failure mode 1 — Caddy is stopped**
Symptom in CI log:
```
Failed to reload caddy.service: Unit caddy.service is not active.
```
Recovery:
```bash
ssh root@<vps>
systemctl start caddy
systemctl status caddy # confirm Active: active (running)
```
Re-run the workflow via Gitea Actions → "Re-run workflow".
**Failure mode 2 — Caddyfile symlink is missing or mis-pointed**
This failure is silent — `systemctl reload caddy` exits 0 but Caddy reloads whatever `/etc/caddy/Caddyfile` currently resolves to. The smoke test may then pass against stale config.
Symptom: smoke test fails on the HSTS value or the `/actuator/health → 404` check despite the Reload Caddy step succeeding.
Diagnosis:
```bash
ssh root@<vps>
ls -la /etc/caddy/Caddyfile
# Should be: lrwxrwxrwx ... /etc/caddy/Caddyfile -> /opt/familienarchiv/infra/caddy/Caddyfile
```
Recovery if symlink is wrong or missing:
```bash
ln -sf /opt/familienarchiv/infra/caddy/Caddyfile /etc/caddy/Caddyfile
systemctl reload caddy
```
**Failure mode 3 — nsenter / Docker socket unavailable**
Symptom in CI log:
```
docker: Cannot connect to the Docker daemon at unix:///var/run/docker.sock.
```
or
```
nsenter: failed to execute /bin/systemctl: No such file or directory
```
The first error means the Docker socket is not mounted into the job container — check `valid_volumes` in `/root/docker/gitea/runner-config.yaml` on the VPS. The second means the Alpine image is running but cannot enter the host mount namespace; verify `--privileged` and `--pid=host` are both present in the workflow step.
The runner label `ubuntu-latest` maps to the Docker image it uses -- this is how `runs-on: ubuntu-latest` in the workflow YAML continues to work unchanged.
---
@@ -200,7 +107,7 @@ jobs:
working-directory: frontend
- name: Upload screenshots
if: always()
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade.
uses: actions/upload-artifact@v4 # ← upgraded from v3
with:
name: unit-test-screenshots
path: frontend/test-results/screenshots/
@@ -227,7 +134,7 @@ jobs:
working-directory: backend
- name: Upload test results
if: always()
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade.
uses: actions/upload-artifact@v4 # ← upgraded from v3
with:
name: backend-test-results
path: backend/target/surefire-reports/
@@ -259,7 +166,7 @@ jobs:
timeout 30 bash -c \
'until docker compose -f docker-compose.yml -f docker-compose.ci.yml exec -T db pg_isready -U archive_user; do sleep 2; done'
- name: Connect job container to compose network
run: docker network connect familienarchiv_archiv-net $(cat /etc/hostname)
run: docker network connect familienarchiv_archive-net $(cat /etc/hostname)
- uses: actions/setup-java@v4
with:
java-version: '21'
@@ -329,7 +236,7 @@ jobs:
E2E_BACKEND_URL: http://localhost:8080
- name: Upload E2E results
if: always()
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade.
uses: actions/upload-artifact@v4 # ← upgraded from v3
with:
name: e2e-results
path: frontend/test-results/e2e/

View File

@@ -1,22 +1,214 @@
# Production Docker Compose & Infrastructure
This document covers VPS sizing, monthly cost, and the Hetzner ecosystem rationale. The compose file and Caddyfile that previously lived inline in this doc are now committed to the repo root.
> **Where to find the live files (after #497)**
> - Production compose: [`docker-compose.prod.yml`](../../docker-compose.prod.yml) (standalone, not an overlay)
> - Caddyfile: [`infra/caddy/Caddyfile`](../../infra/caddy/Caddyfile)
> - Deploy workflows: [`.gitea/workflows/nightly.yml`](../../.gitea/workflows/nightly.yml) and [`.gitea/workflows/release.yml`](../../.gitea/workflows/release.yml)
> - Bootstrap checklist, secrets, rollback procedure: [`docs/DEPLOYMENT.md`](../DEPLOYMENT.md)
The original spec in this doc proposed an overlay pattern (`docker compose -f docker-compose.yml -f docker-compose.prod.yml`) with MinIO disabled in production in favour of Hetzner Object Storage. That approach was retired in #497 in favour of a standalone prod compose that keeps MinIO self-hosted on the VPS. The Hetzner OBS migration is tracked as a future follow-up; the swap is three env vars + `mc mirror` once we decide to do it.
This document contains the full production Docker Compose file, Caddyfile, VPS sizing recommendations, cost breakdown, and Hetzner ecosystem overview.
---
## Observability stack — not yet deployed
## Full docker-compose.prod.yml
Prometheus, Loki, Grafana, Alertmanager, Uptime Kuma, GlitchTip and ntfy are **not** part of the production deployment that #497 landed. They are tracked as follow-up issue #498.
Usage: `docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d`
When that lands the observability containers will join `docker-compose.prod.yml` under a dedicated profile so they can be operated alongside the application stack without affecting the application containers' restart cycle.
```yaml
# docker-compose.prod.yml
# Usage: docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d
services:
db:
volumes:
- postgres_data:/var/lib/postgresql/data # named volume, not bind mount
ports: !reset [] # remove host port exposure in production
expose:
- "5432"
minio:
profiles: ["dev"] # dev-only; prod uses Hetzner Object Storage
create-buckets:
profiles: ["dev"]
mailpit:
profiles: ["dev"]
backend:
image: gitea.example.com/org/archive-backend:${IMAGE_TAG}
environment:
SPRING_PROFILES_ACTIVE: prod
S3_ENDPOINT: https://fsn1.your-objectstorage.com
MAIL_HOST: ${MAIL_HOST}
MAIL_PORT: 587
SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: "true"
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: "true"
ports: !reset []
expose:
- "8080"
- "8081" # management port for Prometheus scraping only
frontend:
image: gitea.example.com/org/archive-frontend:${IMAGE_TAG}
ports: !reset []
expose:
- "3000"
caddy:
image: caddy:2-alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- "443:443/udp"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- caddy_data:/data
- caddy_config:/config
# ── Observability ──────────────────────────────────────────────────────────
prometheus:
image: prom/prometheus:v2.51.0 # pinned
restart: unless-stopped
volumes:
- ./observability/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
expose: ["9090"]
grafana:
image: grafana/grafana:10.4.0 # pinned
restart: unless-stopped
environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD}
GF_PATHS_PROVISIONING: /etc/grafana/provisioning
GF_SERVER_ROOT_URL: https://grafana.example.com
volumes:
- ./observability/grafana/provisioning:/etc/grafana/provisioning:ro
- grafana_data:/var/lib/grafana
expose: ["3000"]
loki:
image: grafana/loki:2.9.0 # pinned
restart: unless-stopped
volumes:
- ./observability/loki-config.yml:/etc/loki/config.yml:ro
- loki_data:/loki
expose: ["3100"]
promtail:
image: grafana/promtail:2.9.0 # pinned
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./observability/promtail-config.yml:/etc/promtail/config.yml:ro
alertmanager:
image: prom/alertmanager:v0.27.0 # pinned
restart: unless-stopped
volumes:
- ./observability/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
expose: ["9093"]
# ── Uptime monitoring ──────────────────────────────────────────────────────
uptime-kuma:
image: louislam/uptime-kuma:1
restart: unless-stopped
volumes:
- uptime_kuma_data:/app/data
expose: ["3001"]
# ── Error tracking ─────────────────────────────────────────────────────────
glitchtip-web:
image: glitchtip/glitchtip:latest
restart: unless-stopped
depends_on: [db]
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db/${GLITCHTIP_DB}
SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
EMAIL_URL: smtp://${MAIL_USERNAME}:${MAIL_PASSWORD}@${MAIL_HOST}:587/?tls=true
GLITCHTIP_DOMAIN: https://errors.example.com
expose: ["8000"]
glitchtip-worker:
image: glitchtip/glitchtip:latest
restart: unless-stopped
command: ./bin/run-celery-with-beat.sh
depends_on: [glitchtip-web]
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db/${GLITCHTIP_DB}
SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
# ── Push notifications ─────────────────────────────────────────────────────
ntfy:
image: binayun/ntfy:latest
restart: unless-stopped
volumes:
- ntfy_data:/var/lib/ntfy
- ./ntfy/server.yml:/etc/ntfy/server.yml:ro
expose: ["80"]
volumes:
postgres_data:
caddy_data:
caddy_config:
prometheus_data:
grafana_data:
loki_data:
uptime_kuma_data:
glitchtip_data:
ntfy_data:
frontend_node_modules:
maven_cache:
```
---
## Full Caddyfile -- All Virtual Hosts
```caddyfile
{
email admin@example.com
}
# Main application
app.example.com {
header {
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
X-Content-Type-Options "nosniff"
X-Frame-Options "DENY"
Referrer-Policy "strict-origin-when-cross-origin"
-Server
}
@api path /api/*
reverse_proxy @api backend:8080
@actuator path /actuator/*
respond @actuator 404
reverse_proxy frontend:3000
}
# Gitea — source code and CI
git.example.com {
reverse_proxy gitea:3000
}
# Grafana — observability
grafana.example.com {
basicauth {
admin $2a$14$...
}
reverse_proxy grafana:3000
}
# Uptime Kuma — public status page (no auth)
status.example.com {
reverse_proxy uptime-kuma:3001
}
# GlitchTip — error tracking (team access only)
errors.example.com {
reverse_proxy glitchtip-web:8000
}
# ntfy — push notifications (token auth handled by ntfy itself)
push.example.com {
reverse_proxy ntfy:80
}
```
---
@@ -24,47 +216,61 @@ When that lands the observability containers will join `docker-compose.prod.yml`
### Recommended: Hetzner CX32
**Specs**: 4 vCPU, 8 GB RAM, 80 GB SSD · **Cost**: 17 EUR/mo
**Specs**: 4 vCPU, 8 GB RAM, 80 GB SSD
**Cost**: 17 EUR/mo
Sufficient for the application stack (Postgres, MinIO, OCR with `mem_limit: 12g`, backend, frontend, Caddy) on a CX32 today. Once the observability stack lands (Prometheus/Loki/Grafana/Alertmanager add ~2 GB) consider a CX42.
This runs comfortably:
- SvelteKit (Node)
- Spring Boot (JVM -- needs ~512 MB minimum)
- PostgreSQL 16
- Caddy
- Prometheus + Grafana + Loki + Alertmanager (~2 GB)
- Gitea + Gitea runner
- Uptime Kuma
- GlitchTip + worker
- ntfy
### When to Upgrade: Hetzner CX42
**Specs**: 8 vCPU, 16 GB RAM · **Cost**: 29 EUR/mo
**Cost**: 29 EUR/mo
Upgrade when:
- Observability stack adds memory pressure (Loki + Grafana with >30 days retention)
- OCR throughput needs scaling beyond a single-node Surya/Kraken setup
- Real user load profiled in Grafana shows response-time degradation
- Loki log retention exceeds 30 days and RAM pressure appears
- GlitchTip error volume grows significantly
- Response times degrade under real user load (check Grafana first)
Never upgrade the VPS tier before profiling most perceived performance issues are application bugs, not resource constraints.
Never upgrade the VPS tier before profiling with Grafana -- most perceived performance issues are application bugs, not resource constraints.
---
## Monthly Cost Breakdown (production v1)
## Monthly Cost Breakdown
| Service | Cost |
|---|---|
| Hetzner CX32 VPS | 17.00 EUR |
| Hetzner DNS | 0.00 EUR |
| Hetzner Object Storage (~200 GB) | 5.00 EUR |
| Hetzner SMTP relay | ~1.00 EUR |
| **Total** | **~18 EUR/mo** |
| Hetzner DNS | 0.00 EUR |
| **Total** | **~23 EUR/mo** |
MinIO data lives on the VPS disk (no Object Storage line item yet). The Hetzner OBS migration would add ~5 EUR/mo at ~200 GB.
Everything else -- Gitea, Grafana, Prometheus, Loki, Uptime Kuma, GlitchTip, ntfy, Caddy, Let's Encrypt TLS -- runs on the VPS. Zero additional cost.
Equivalent SaaS stack: 200300 EUR/mo.
Equivalent SaaS stack: 200-300 EUR/mo.
---
## Hetzner Ecosystem Rationale
## Hetzner Ecosystem Overview
Everything possible runs on Hetzner. One provider, one bill, GDPR-compliant by default (German company, EU data centres).
Everything possible runs on Hetzner. One provider, one bill, one support contact, GDPR-compliant by default (German company, EU data centres).
| Service | Use today |
### What Hetzner Provides
| Service | Description |
|---|---|
| **VPS (Cloud Servers)** | The whole application stack |
| **VPS (Cloud Servers)** | CX22 to CX52 -- the entire stack runs here |
| **Object Storage** | S3-compatible, replaces AWS S3 and MinIO in production |
| **DNS** | Free, supports A/AAAA/CNAME/MX/TXT, API-accessible for Caddy ACME |
| **Firewall** | Network-level firewall (in addition to host `ufw`) |
| **Snapshots** | Quick VPS rollback after a bad deploy (0.013 EUR/GB/mo) |
| **SMTP relay** | Transactional email from `noreply@raddatz.cloud` |
| **Object Storage** | Not used today — MinIO stays on-VPS. Available when we decide to migrate |
| **Firewall** | Built-in cloud firewall (use in addition to ufw, not instead of) |
| **Snapshots** | VPS snapshots for quick rollback after a bad deploy (0.013 EUR/GB/mo) |
| **Volumes** | Attachable block storage if the VPS disk fills up (0.048 EUR/GB/mo) |
| **SMTP relay** | Transactional email via your Hetzner account |

View File

@@ -40,7 +40,8 @@ src/
│ ├── profile/ # User profile settings
│ ├── users/[id]/ # Public user profile page
│ ├── login/ logout/ register/
── forgot-password/ reset-password/
── forgot-password/ reset-password/
│ └── demo/ # Dev-only demos
├── lib/ # Domain-based package structure (mirrors backend)
│ ├── document/ # Document domain: components, stores, services, utils
│ │ ├── annotation/ # Annotation overlay components
@@ -165,7 +166,7 @@ npm run check # svelte-check (type checking)
```bash
npm run test # Vitest unit + server tests (headless)
npm run test:coverage # Coverage report (server + client)
npm run test:coverage # Coverage report (server project only)
npm run test:e2e # Playwright E2E tests
npm run test:e2e:headed # Playwright E2E with visible browser
npm run test:e2e:ui # Playwright UI mode

View File

@@ -1,34 +1,15 @@
# syntax=docker/dockerfile:1.7
FROM node:20-alpine
# ── Development ──────────────────────────────────────────────────────────────
# Used by docker-compose.yml (target: development). Source is bind-mounted in
# dev so the COPY . below is effectively replaced at runtime; the layer still
# exists so the image is self-contained for cold starts (e.g. devcontainer).
FROM node:20.19.0-alpine3.21 AS development
WORKDIR /app
# Install dependencies as a separate layer so they are cached when only source changes
COPY package.json package-lock.json ./
RUN npm ci
# Source is mounted at runtime via docker-compose volume
# This COPY is only used when building without a volume (e.g. production image)
COPY . .
EXPOSE 5173
CMD ["npm", "run", "dev"]
# ── Build ────────────────────────────────────────────────────────────────────
# Compiles the SvelteKit Node-adapter output to /app/build.
FROM node:20.19.0-alpine3.21 AS build
WORKDIR /app
COPY package.json package-lock.json ./
RUN npm ci
COPY . .
RUN npm run build
# ── Production ───────────────────────────────────────────────────────────────
# Self-contained Node server. `node build` is the adapter-node entrypoint.
FROM node:20.19.0-alpine3.21 AS production
WORKDIR /app
ENV NODE_ENV=production
COPY --from=build /app/build ./build
COPY --from=build /app/package.json ./package.json
COPY --from=build /app/package-lock.json ./package-lock.json
RUN npm ci --omit=dev --ignore-scripts
EXPOSE 3000
CMD ["node", "build"]

View File

@@ -72,31 +72,6 @@ export default defineConfig(
]
}
},
{
files: ['**/*.spec.ts', '**/*.test.ts'],
rules: {
'no-restricted-syntax': [
'error',
{
selector:
"CallExpression[callee.object.name='vi'][callee.property.name='mock'] > Literal[value=/^pdfjs-dist/]",
message:
"Banned: vi.mock('pdfjs-dist', factory) causes a birpc teardown race in browser-mode specs — see ADR 012. Use the libLoader prop injection pattern instead."
},
{
// ADR 012 / #553. The named mechanism: an async vi.mock factory whose
// body performs `await import(...)` produces a late birpc roundtrip
// during worker teardown. The factory body must be synchronous; if
// you need to share state between the spec and the mock, use
// `vi.hoisted` (see DropZone.svelte.spec.ts).
selector:
"CallExpression[callee.object.name='vi'][callee.property.name='mock'][arguments.1.type='ArrowFunctionExpression'][arguments.1.async=true]:has(AwaitExpression > ImportExpression)",
message:
'Banned: vi.mock(..., async () => { await import(...) }) causes a birpc teardown race in browser-mode specs — see ADR 012. Use a synchronous factory + vi.hoisted instead.'
}
]
}
},
{
plugins: { boundaries },
settings: {

View File

@@ -345,11 +345,8 @@
"admin_system_import_btn_retry": "Erneut starten",
"admin_system_import_status_idle": "Kein Import gestartet.",
"admin_system_import_status_running": "Import läuft…",
"admin_system_import_status_done": "Import abgeschlossen",
"admin_system_import_status_done_label": "Dokumente verarbeitet",
"admin_system_import_status_failed": "Import fehlgeschlagen",
"admin_system_import_failed_no_spreadsheet": "Keine Tabellendatei gefunden.",
"admin_system_import_failed_internal": "Interner Fehler beim Import.",
"admin_system_import_status_done": "Import abgeschlossen {count} Dokumente verarbeitet.",
"admin_system_import_status_failed": "Fehler: {message}",
"admin_system_thumbnails_heading": "Thumbnails erzeugen",
"admin_system_thumbnails_description": "Erzeugt Vorschaubilder für Dokumente ohne Thumbnail (z. B. nach dem Massenimport).",
"admin_system_thumbnails_btn_start": "Thumbnails erzeugen",
@@ -706,8 +703,6 @@
"error_invite_exhausted": "Dieser Einladungslink wurde bereits vollständig verwendet.",
"error_invite_revoked": "Dieser Einladungslink wurde deaktiviert.",
"error_invite_expired": "Dieser Einladungslink ist abgelaufen.",
"error_group_has_active_invites": "Diese Gruppe kann nicht gelöscht werden, da sie in einer aktiven Einladung verwendet wird.",
"error_group_not_found": "Die angegebene Gruppe existiert nicht.",
"register_heading": "Konto erstellen",
"register_subtext": "Du wurdest eingeladen, dem Familienarchiv beizutreten.",
"register_label_first_name": "Vorname",
@@ -767,9 +762,6 @@
"admin_new_invite_prefill_last": "Nachname vorausfüllen (optional)",
"admin_new_invite_prefill_email": "E-Mail vorausfüllen (optional)",
"admin_new_invite_expires": "Ablaufdatum (optional)",
"admin_new_invite_groups": "Gruppen (optional)",
"admin_new_invite_no_groups": "Keine Gruppen vorhanden.",
"admin_invite_groups_load_error": "Gruppen konnten nicht geladen werden. Die Einladung kann ohne Gruppenauswahl erstellt werden.",
"admin_invite_created_title": "Einladung erstellt",
"admin_invite_created_desc": "Teile diesen Link mit der einzuladenden Person:",
"admin_invite_revoke_confirm": "Einladung wirklich widerrufen?",

View File

@@ -345,11 +345,8 @@
"admin_system_import_btn_retry": "Start again",
"admin_system_import_status_idle": "No import started.",
"admin_system_import_status_running": "Import running…",
"admin_system_import_status_done": "Import complete",
"admin_system_import_status_done_label": "Documents processed",
"admin_system_import_status_failed": "Import failed",
"admin_system_import_failed_no_spreadsheet": "No spreadsheet file found.",
"admin_system_import_failed_internal": "Import failed due to an internal error.",
"admin_system_import_status_done": "Import complete {count} documents processed.",
"admin_system_import_status_failed": "Error: {message}",
"admin_system_thumbnails_heading": "Generate thumbnails",
"admin_system_thumbnails_description": "Generates preview images for documents without a thumbnail (e.g. after the mass import).",
"admin_system_thumbnails_btn_start": "Generate thumbnails",
@@ -706,8 +703,6 @@
"error_invite_exhausted": "This invite link has already been fully used.",
"error_invite_revoked": "This invite link has been deactivated.",
"error_invite_expired": "This invite link has expired.",
"error_group_has_active_invites": "This group cannot be deleted because it is referenced by one or more active invite links.",
"error_group_not_found": "The specified group does not exist.",
"register_heading": "Create account",
"register_subtext": "You've been invited to join Familienarchiv.",
"register_label_first_name": "First name",
@@ -767,9 +762,6 @@
"admin_new_invite_prefill_last": "Pre-fill last name (optional)",
"admin_new_invite_prefill_email": "Pre-fill email (optional)",
"admin_new_invite_expires": "Expiry date (optional)",
"admin_new_invite_groups": "Groups (optional)",
"admin_new_invite_no_groups": "No groups exist.",
"admin_invite_groups_load_error": "Groups could not be loaded. The invite can still be created without group assignment.",
"admin_invite_created_title": "Invite created",
"admin_invite_created_desc": "Share this link with the person you are inviting:",
"admin_invite_revoke_confirm": "Really revoke this invite?",

View File

@@ -345,11 +345,8 @@
"admin_system_import_btn_retry": "Iniciar de nuevo",
"admin_system_import_status_idle": "No hay importación iniciada.",
"admin_system_import_status_running": "Importación en curso…",
"admin_system_import_status_done": "Importación completada",
"admin_system_import_status_done_label": "Documentos procesados",
"admin_system_import_status_failed": "Importación fallida",
"admin_system_import_failed_no_spreadsheet": "No se encontró ninguna hoja de cálculo.",
"admin_system_import_failed_internal": "Error interno durante la importación.",
"admin_system_import_status_done": "Importación completada {count} documentos procesados.",
"admin_system_import_status_failed": "Error: {message}",
"admin_system_thumbnails_heading": "Generar miniaturas",
"admin_system_thumbnails_description": "Genera imágenes de vista previa para documentos sin miniatura (p. ej. tras la importación masiva).",
"admin_system_thumbnails_btn_start": "Generar miniaturas",
@@ -706,8 +703,6 @@
"error_invite_exhausted": "Este enlace de invitación ya ha sido completamente utilizado.",
"error_invite_revoked": "Este enlace de invitación ha sido desactivado.",
"error_invite_expired": "Este enlace de invitación ha expirado.",
"error_group_has_active_invites": "Este grupo no puede eliminarse porque está referenciado por uno o más enlaces de invitación activos.",
"error_group_not_found": "El grupo especificado no existe.",
"register_heading": "Crear cuenta",
"register_subtext": "Has sido invitado a unirte al Familienarchiv.",
"register_label_first_name": "Nombre",
@@ -767,9 +762,6 @@
"admin_new_invite_prefill_last": "Prellenar apellido (opcional)",
"admin_new_invite_prefill_email": "Prellenar correo (opcional)",
"admin_new_invite_expires": "Fecha de vencimiento (opcional)",
"admin_new_invite_groups": "Grupos (opcional)",
"admin_new_invite_no_groups": "No hay grupos disponibles.",
"admin_invite_groups_load_error": "No se pudieron cargar los grupos. La invitación puede crearse sin asignar grupos.",
"admin_invite_created_title": "Invitación creada",
"admin_invite_created_desc": "Comparte este enlace con la persona invitada:",
"admin_invite_revoke_confirm": "¿Realmente revocar esta invitación?",

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,6 @@
"build": "vite build",
"preview": "vite preview",
"prepare": "svelte-kit sync || true && git -C .. config core.hooksPath .husky 2>/dev/null || true",
"postinstall": "patch-package",
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
"format": "prettier --write .",
@@ -16,7 +15,7 @@
"lint:boundary-demo": "eslint src/lib/tag/__fixtures__/",
"test:unit": "vitest",
"test": "npm run test:unit -- --run",
"test:coverage": "vitest run --coverage --project=server; vitest run -c vitest.client-coverage.config.ts --coverage",
"test:coverage": "vitest run --coverage --project=server",
"test:e2e": "playwright test",
"test:e2e:headed": "playwright test --headed",
"test:e2e:ui": "playwright test --ui",
@@ -46,7 +45,6 @@
"@types/diff": "^7.0.2",
"@types/node": "^24",
"@vitest/browser-playwright": "^4.0.10",
"@vitest/coverage-istanbul": "^4.1.0",
"@vitest/coverage-v8": "^4.1.0",
"eslint": "^9.39.1",
"eslint-config-prettier": "^10.1.8",
@@ -55,7 +53,6 @@
"eslint-plugin-svelte": "^3.13.0",
"globals": "^16.5.0",
"openapi-typescript": "^7.8.0",
"patch-package": "^8.0.0",
"playwright": "^1.56.1",
"prettier": "^3.6.2",
"prettier-plugin-svelte": "^3.4.0",

View File

@@ -1,62 +0,0 @@
diff --git a/node_modules/@vitest/browser-playwright/dist/index.js b/node_modules/@vitest/browser-playwright/dist/index.js
index 5d0d37b..821d7b4 100644
--- a/node_modules/@vitest/browser-playwright/dist/index.js
+++ b/node_modules/@vitest/browser-playwright/dist/index.js
@@ -935,7 +935,7 @@ class PlaywrightBrowserProvider {
createMocker() {
const idPreficates = new Map();
const sessionIds = new Map();
- function createPredicate(sessionId, url) {
+ function createPredicate(url) {
const moduleUrl = new URL(url, "http://localhost");
const predicate = (url) => {
if (url.searchParams.has("_vitest_original")) {
@@ -960,11 +960,7 @@ class PlaywrightBrowserProvider {
}
return true;
};
- const ids = sessionIds.get(sessionId) || [];
- ids.push(moduleUrl.href);
- sessionIds.set(sessionId, ids);
- idPreficates.set(predicateKey(sessionId, moduleUrl.href), predicate);
- return predicate;
+ return { url: moduleUrl.href, predicate };
}
function predicateKey(sessionId, url) {
return `${sessionId}:${url}`;
@@ -972,7 +968,23 @@ class PlaywrightBrowserProvider {
return {
register: async (sessionId, module) => {
const page = this.getPage(sessionId);
- await page.context().route(createPredicate(sessionId, module.url), async (route) => {
+ const { url: moduleUrl, predicate } = createPredicate(module.url);
+ const key = predicateKey(sessionId, moduleUrl);
+ // Backport of vitest PR #10267: if a route handler is already
+ // registered for this resolved module URL in this session,
+ // unroute it before installing the new one. Without this guard,
+ // duplicate-id mocks (e.g. '$lib/foo.svelte' + '$lib/foo.svelte.js')
+ // leak an orphan route whose handler crashes after the next
+ // session's birpc channel closes.
+ const existingPredicate = idPreficates.get(key);
+ if (existingPredicate) {
+ await page.context().unroute(existingPredicate);
+ }
+ const ids = sessionIds.get(sessionId) ?? new Set();
+ ids.add(moduleUrl);
+ sessionIds.set(sessionId, ids);
+ idPreficates.set(key, predicate);
+ await page.context().route(predicate, async (route) => {
if (module.type === "manual") {
const exports$1 = Object.keys(await module.resolve());
const body = createManualModuleSource(module.url, exports$1);
@@ -1033,8 +1045,8 @@ class PlaywrightBrowserProvider {
},
clear: async (sessionId) => {
const page = this.getPage(sessionId);
- const ids = sessionIds.get(sessionId) || [];
- const promises = ids.map((id) => {
+ const ids = sessionIds.get(sessionId) ?? new Set();
+ const promises = [...ids].map((id) => {
const key = predicateKey(sessionId, id);
const predicate = idPreficates.get(key);
if (predicate) {

View File

@@ -1,20 +0,0 @@
import { describe, it, expect } from 'vitest';
// Browser-mode tests must run with SvelteKit's hover-prefetch disabled.
// Hover-prefetch fires real `fetch` requests for the target route's loader
// chunks; those go through the same Playwright route handler that serves
// mocked modules. Even after `cleanup()` tears down the iframe, an in-flight
// prefetch can still hit the handler — and if the worker's birpc channel has
// closed by then, the handler raises an unhandled rejection. ADR-012 / #553.
//
// This test enforces that the test-setup file ran and switched preload-data
// off on `document.body` before any spec started rendering.
describe('browser test setup', () => {
it('disables SvelteKit loader-data prefetch on document.body', () => {
expect(document.body.dataset.sveltekitPreloadData).toBe('off');
});
it('disables SvelteKit route-code prefetch on document.body', () => {
expect(document.body.dataset.sveltekitPreloadCode).toBe('off');
});
});

View File

@@ -1,82 +0,0 @@
import { describe, it, expect } from 'vitest';
import { readdirSync, readFileSync } from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
// Belt-and-braces detector for the birpc teardown race named in ADR-012 / #553.
// ESLint catches the pattern at save time, CI grep catches it before the test
// suite launches, and this in-suite test catches it at every vitest invocation —
// the layer hardest to disable or scope around.
//
// We scan source text rather than parsing AST: fast, no parser dependency,
// good enough for the named anti-pattern. The pattern matches
// `vi.mock(<arg>, async ... { ... await import(...) ... })`.
const ASYNC_MOCK_WITH_DYNAMIC_IMPORT = /vi\.mock\([^)]*,\s*async[^{]*\{[\s\S]*?await\s+import\s*\(/;
export function hasAsyncMockFactoryWithDynamicImport(source: string): boolean {
return ASYNC_MOCK_WITH_DYNAMIC_IMPORT.test(source);
}
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const SRC_ROOT = path.resolve(__dirname, '..');
function findBrowserSpecs(): string[] {
const entries = readdirSync(SRC_ROOT, { recursive: true, withFileTypes: true });
return entries
.filter(
(e) =>
e.isFile() && (e.name.endsWith('.svelte.test.ts') || e.name.endsWith('.svelte.spec.ts'))
)
.map((e) => path.join(e.parentPath ?? (e as { path: string }).path, e.name));
}
describe('scan: hasAsyncMockFactoryWithDynamicImport', () => {
it('flags async vi.mock factory with await import in body', () => {
const fixture = `vi.mock('$app/stores', async () => {
const mod = await import('./__mocks__/navigatingStore');
return { navigating: mod.navigatingStore };
});`;
expect(hasAsyncMockFactoryWithDynamicImport(fixture)).toBe(true);
});
it('does not flag sync vi.mock factory', () => {
const fixture = `vi.mock('$app/state', () => ({ navigating: { type: null } }));`;
expect(hasAsyncMockFactoryWithDynamicImport(fixture)).toBe(false);
});
it('does not flag async vi.mock factory without dynamic import', () => {
const fixture = `vi.mock('foo', async () => {
const x = await Promise.resolve(42);
return { bar: x };
});`;
expect(hasAsyncMockFactoryWithDynamicImport(fixture)).toBe(false);
});
it('does not flag dynamic import outside any vi.mock', () => {
const fixture = `async function load() {
const mod = await import('./something');
return mod.default;
}`;
expect(hasAsyncMockFactoryWithDynamicImport(fixture)).toBe(false);
});
it('flags async factory written as async function expression', () => {
const fixture = `vi.mock('foo', async function () {
const mod = await import('./bar');
return mod;
});`;
expect(hasAsyncMockFactoryWithDynamicImport(fixture)).toBe(true);
});
});
describe('browser specs: no async vi.mock factory contains await import', () => {
it('every src/**/*.svelte.{test,spec}.ts file is clean', () => {
const specFiles = findBrowserSpecs();
expect(specFiles.length).toBeGreaterThan(0);
const offenders = specFiles.filter((file) =>
hasAsyncMockFactoryWithDynamicImport(readFileSync(file, 'utf-8'))
);
expect(offenders).toEqual([]);
});
});

View File

@@ -1,130 +0,0 @@
import { describe, it, expect } from 'vitest';
import { readdirSync, readFileSync } from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
// Belt-and-braces detector for the duplicate-id birpc race named in
// ADR-012 / #553. When the same resolved module URL is mocked via two
// distinct vi.mock id strings (e.g. '$lib/foo.svelte' and
// '$lib/foo.svelte.js'), @vitest/browser-playwright registers two
// Playwright routes against one cleanup slot — the orphan survives, fires
// after the next session's birpc closes, and crashes the run with
// "[birpc] rpc is closed, cannot call resolveManualMock".
//
// Fixed upstream in vitest PR #10267; until that fix reaches a published
// release, normalisation in user-land is the practical guard. This test
// catches the pattern at every vitest invocation — the layer hardest to
// disable or scope around.
const VI_MOCK_ID = /vi\.mock\(\s*['"]([^'"]+)['"]/g;
function extractMockIds(source: string): string[] {
const ids: string[] = [];
for (const match of source.matchAll(VI_MOCK_ID)) {
ids.push(match[1]);
}
return ids;
}
function canonicalise(id: string): string {
if (id.endsWith('.svelte.js')) return id.slice(0, -3);
if (id.endsWith('.svelte.ts')) return id.slice(0, -3);
return id;
}
export function findDuplicateMockIds(
specSources: Record<string, string>
): Map<string, Set<string>> {
const byCanonical = new Map<string, Set<string>>();
for (const source of Object.values(specSources)) {
for (const raw of extractMockIds(source)) {
const canonical = canonicalise(raw);
const existing = byCanonical.get(canonical) ?? new Set<string>();
existing.add(raw);
byCanonical.set(canonical, existing);
}
}
const duplicates = new Map<string, Set<string>>();
for (const [canonical, raws] of byCanonical) {
if (raws.size >= 2) duplicates.set(canonical, raws);
}
return duplicates;
}
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const SRC_ROOT = path.resolve(__dirname, '..');
function findBrowserSpecs(): string[] {
const entries = readdirSync(SRC_ROOT, { recursive: true, withFileTypes: true });
return entries
.filter(
(e) =>
e.isFile() && (e.name.endsWith('.svelte.test.ts') || e.name.endsWith('.svelte.spec.ts'))
)
.map((e) => path.join(e.parentPath ?? (e as { path: string }).path, e.name));
}
describe('scan: findDuplicateMockIds', () => {
it('flags two specs mocking the same module under .svelte and .svelte.js', () => {
const dup = findDuplicateMockIds({
'a.spec.ts': `vi.mock('$lib/foo.svelte', () => ({}));`,
'b.spec.ts': `vi.mock('$lib/foo.svelte.js', () => ({}));`
});
expect(dup.get('$lib/foo.svelte')).toEqual(new Set(['$lib/foo.svelte', '$lib/foo.svelte.js']));
});
it('does not flag two specs both using $lib/foo.svelte', () => {
const dup = findDuplicateMockIds({
'a.spec.ts': `vi.mock('$lib/foo.svelte', () => ({}));`,
'b.spec.ts': `vi.mock('$lib/foo.svelte', () => ({}));`
});
expect(dup.size).toBe(0);
});
it('does not flag $app/state and $app/stores (different modules)', () => {
const dup = findDuplicateMockIds({
'a.spec.ts': `vi.mock('$app/state', () => ({}));`,
'b.spec.ts': `vi.mock('$app/stores', () => ({}));`
});
expect(dup.size).toBe(0);
});
it('does not flag $lib/foo and $lib/bar (different canonical paths)', () => {
const dup = findDuplicateMockIds({
'a.spec.ts': `vi.mock('$lib/foo', () => ({}));`,
'b.spec.ts': `vi.mock('$lib/bar', () => ({}));`
});
expect(dup.size).toBe(0);
});
it('flags both spellings within a single file', () => {
const dup = findDuplicateMockIds({
'a.spec.ts': `
vi.mock('$lib/foo.svelte', () => ({}));
vi.mock('$lib/foo.svelte.js', () => ({}));
`
});
expect(dup.get('$lib/foo.svelte')?.size).toBe(2);
});
it('canonicalises .svelte.ts the same way as .svelte.js', () => {
const dup = findDuplicateMockIds({
'a.spec.ts': `vi.mock('$lib/foo.svelte', () => ({}));`,
'b.spec.ts': `vi.mock('$lib/foo.svelte.ts', () => ({}));`
});
expect(dup.get('$lib/foo.svelte')?.size).toBe(2);
});
});
describe('browser specs: no duplicate-id vi.mock calls across the suite', () => {
it('every mocked module is referenced under exactly one id string', () => {
const specFiles = findBrowserSpecs();
expect(specFiles.length).toBeGreaterThan(0);
const sources = Object.fromEntries(
specFiles.map((file) => [file, readFileSync(file, 'utf-8')])
);
const duplicates = findDuplicateMockIds(sources);
const report = Object.fromEntries([...duplicates].map(([k, v]) => [k, [...v]]));
expect(report).toEqual({});
});
});

View File

@@ -5,14 +5,7 @@ import { env } from 'process';
import { cookieName, cookieMaxAge } from '$lib/paraglide/runtime';
import { detectLocale } from '$lib/shared/server/locale';
const PUBLIC_PATHS = [
'/login',
'/logout',
'/forgot-password',
'/reset-password',
'/register',
'/hilfe/transkription' // prerendered help page — must be reachable without an auth cookie
];
const PUBLIC_PATHS = ['/login', '/logout', '/forgot-password', '/reset-password', '/register'];
const handleLocaleDetection: Handle = ({ event, resolve }) => {
if (!event.cookies.get(cookieName)) {

View File

@@ -1,56 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import ChronikEmptyState from './ChronikEmptyState.svelte';
afterEach(cleanup);
describe('ChronikEmptyState', () => {
it('renders the first-run title and body and the clock icon', async () => {
render(ChronikEmptyState, { props: { variant: 'first-run' as const } });
await expect.element(page.getByText('Noch nichts geschehen')).toBeVisible();
await expect.element(page.getByText(/sobald jemand aus der familie/i)).toBeVisible();
const wrapper = document.querySelector('[data-testid="chronik-empty-state"]');
expect(wrapper?.getAttribute('data-variant')).toBe('first-run');
});
it('renders the filter-empty title and body', async () => {
render(ChronikEmptyState, { props: { variant: 'filter-empty' as const } });
await expect.element(page.getByText('Nichts in dieser Ansicht')).toBeVisible();
await expect.element(page.getByText('In diesem Filter gibt es keine Einträge.')).toBeVisible();
const wrapper = document.querySelector('[data-testid="chronik-empty-state"]');
expect(wrapper?.getAttribute('data-variant')).toBe('filter-empty');
});
it('renders the inbox-zero title and no body paragraph', async () => {
render(ChronikEmptyState, { props: { variant: 'inbox-zero' as const } });
await expect.element(page.getByText('Keine neuen Erwähnungen')).toBeVisible();
// Only one <p> (the title) since body is empty
const wrapper = document.querySelector('[data-testid="chronik-empty-state"]');
const paragraphs = wrapper?.querySelectorAll('p');
expect(paragraphs?.length).toBe(1);
expect(wrapper?.getAttribute('data-variant')).toBe('inbox-zero');
});
it('uses the accent color icon for inbox-zero (vs ink-3 for others)', async () => {
render(ChronikEmptyState, { props: { variant: 'inbox-zero' as const } });
const wrapper = document.querySelector('[data-testid="chronik-empty-state"]');
const svg = wrapper?.querySelector('svg');
expect(svg?.getAttribute('class')).toContain('text-accent');
});
it('uses the ink-3 color icon for first-run', async () => {
render(ChronikEmptyState, { props: { variant: 'first-run' as const } });
const wrapper = document.querySelector('[data-testid="chronik-empty-state"]');
const svg = wrapper?.querySelector('svg');
expect(svg?.getAttribute('class')).toContain('text-ink-3');
});
});

View File

@@ -1,37 +0,0 @@
import { describe, it, expect, vi, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import ChronikErrorCard from './ChronikErrorCard.svelte';
afterEach(cleanup);
describe('ChronikErrorCard', () => {
it('renders the default error message when no message is supplied', async () => {
render(ChronikErrorCard, { props: { onRetry: () => {} } });
await expect.element(page.getByText(/Aktivitäten konnten nicht/i)).toBeVisible();
});
it('renders the supplied message when provided', async () => {
render(ChronikErrorCard, {
props: { onRetry: () => {}, message: 'Custom error message' }
});
await expect.element(page.getByText('Custom error message')).toBeVisible();
});
it('calls onRetry when the retry button is clicked', async () => {
const onRetry = vi.fn();
render(ChronikErrorCard, { props: { onRetry } });
await page.getByRole('button', { name: /erneut versuchen/i }).click();
expect(onRetry).toHaveBeenCalledOnce();
});
it('marks the card as role="alert" for assistive tech', async () => {
render(ChronikErrorCard, { props: { onRetry: () => {} } });
await expect.element(page.getByRole('alert')).toBeVisible();
});
});

View File

@@ -1,53 +0,0 @@
import { describe, it, expect, vi, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import ChronikFilterPills from './ChronikFilterPills.svelte';
afterEach(cleanup);
describe('ChronikFilterPills', () => {
it('renders the radiogroup with the label', async () => {
render(ChronikFilterPills, { props: { value: 'alle' as const, onChange: () => {} } });
await expect
.element(page.getByRole('radiogroup', { name: /aktivitäten filtern/i }))
.toBeVisible();
});
it('renders all five filter pills', async () => {
render(ChronikFilterPills, { props: { value: 'alle' as const, onChange: () => {} } });
const radios = document.querySelectorAll('[role="radio"]');
expect(radios.length).toBe(5);
});
it('marks the active filter as aria-checked=true', async () => {
render(ChronikFilterPills, { props: { value: 'fuer-dich' as const, onChange: () => {} } });
const active = document.querySelector('[data-filter-value="fuer-dich"]') as HTMLElement;
expect(active.getAttribute('aria-checked')).toBe('true');
});
it('sets tabindex=0 on the active pill and -1 on others', async () => {
render(ChronikFilterPills, { props: { value: 'kommentare' as const, onChange: () => {} } });
const active = document.querySelector('[data-filter-value="kommentare"]') as HTMLElement;
const others = Array.from(document.querySelectorAll('[role="radio"]')).filter(
(el) => el !== active
) as HTMLElement[];
expect(active.tabIndex).toBe(0);
others.forEach((el) => expect(el.tabIndex).toBe(-1));
});
it('calls onChange with the new filter value when clicked', async () => {
const onChange = vi.fn();
render(ChronikFilterPills, { props: { value: 'alle' as const, onChange } });
const transcription = document.querySelector(
'[data-filter-value="transkription"]'
) as HTMLElement;
transcription.click();
expect(onChange).toHaveBeenCalledWith('transkription');
});
});

View File

@@ -79,7 +79,7 @@ function href(n: NotificationItem): string {
<ul role="list" class="flex flex-col gap-2">
{#each unread as n (n.id)}
<li
class="chronik-fade-in group flex items-start gap-3 rounded-sm p-2 transition-colors hover:bg-canvas"
class="fade-in group flex items-start gap-3 rounded-sm p-2 transition-colors hover:bg-canvas"
>
<a
href={href(n)}
@@ -124,3 +124,26 @@ function href(n: NotificationItem): string {
</ul>
{/if}
</section>
<style>
.fade-in {
animation: chronik-fade-in 160ms ease-out;
}
@keyframes chronik-fade-in {
from {
opacity: 0;
transform: translateY(-4px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
@media (prefers-reduced-motion: reduce) {
.fade-in {
animation: none;
}
}
</style>

View File

@@ -1,132 +0,0 @@
import { describe, it, expect, vi, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import ChronikFuerDichBox from './ChronikFuerDichBox.svelte';
import type { NotificationItem } from '$lib/notification/notifications';
afterEach(cleanup);
const mention = (overrides: Partial<NotificationItem> = {}): NotificationItem => ({
id: 'n-1',
type: 'MENTION',
documentId: 'doc-1',
referenceId: 'ref-1',
annotationId: null,
read: false,
createdAt: new Date().toISOString(),
actorName: 'Anna',
documentTitle: 'Brief 1899',
...overrides
});
describe('ChronikFuerDichBox', () => {
it('renders the inbox-zero state when there are no unread', async () => {
render(ChronikFuerDichBox, {
props: { unread: [], onMarkRead: () => {}, onMarkAllRead: () => {} }
});
await expect.element(page.getByText(/keine neuen erwähnungen/i)).toBeVisible();
const link = document.querySelector('a[href="/aktivitaeten?filter=fuer-dich"]');
expect(link).not.toBeNull();
});
it('renders the count badge with the unread count', async () => {
render(ChronikFuerDichBox, {
props: {
unread: [mention(), mention({ id: 'n-2' }), mention({ id: 'n-3' })],
onMarkRead: () => {},
onMarkAllRead: () => {}
}
});
const badge = document.querySelector('[data-testid="chronik-fuerdich-count"]');
expect(badge?.textContent).toContain('3');
});
it('uses the @ glyph for MENTION and ↩ for REPLY', async () => {
render(ChronikFuerDichBox, {
props: {
unread: [mention({ id: 'n-m', type: 'MENTION' }), mention({ id: 'n-r', type: 'REPLY' })],
onMarkRead: () => {},
onMarkAllRead: () => {}
}
});
const items = document.querySelectorAll('ul[role="list"] li');
expect(items.length).toBe(2);
expect(items[0].textContent).toContain('@');
expect(items[1].textContent).toContain('↩');
});
it('renders MENTION verb text from paraglide messages', async () => {
render(ChronikFuerDichBox, {
props: {
unread: [mention({ actorName: 'Bertha' })],
onMarkRead: () => {},
onMarkAllRead: () => {}
}
});
await expect
.element(page.getByText(/bertha hat dich in einem kommentar erwähnt/i))
.toBeVisible();
});
it('renders REPLY verb text from paraglide messages', async () => {
render(ChronikFuerDichBox, {
props: {
unread: [mention({ type: 'REPLY', actorName: 'Carl' })],
onMarkRead: () => {},
onMarkAllRead: () => {}
}
});
await expect
.element(page.getByText(/carl hat auf deinen kommentar geantwortet/i))
.toBeVisible();
});
it('calls onMarkRead with the notification when its dismiss button is clicked', async () => {
const onMarkRead = vi.fn();
const item = mention({ id: 'n-7' });
render(ChronikFuerDichBox, {
props: { unread: [item], onMarkRead, onMarkAllRead: () => {} }
});
const dismiss = document.querySelector(
'[data-testid="chronik-fuerdich-dismiss"]'
) as HTMLElement;
dismiss.click();
expect(onMarkRead).toHaveBeenCalledWith(item);
});
it('calls onMarkAllRead when the mark-all-read button is clicked', async () => {
const onMarkAllRead = vi.fn();
render(ChronikFuerDichBox, {
props: {
unread: [mention()],
onMarkRead: () => {},
onMarkAllRead
}
});
const btn = document.querySelector('[data-testid="chronik-mark-all-read"]') as HTMLElement;
btn.click();
expect(onMarkAllRead).toHaveBeenCalledOnce();
});
it('builds a deep-link href to the comment for each notification', async () => {
render(ChronikFuerDichBox, {
props: {
unread: [mention({ documentId: 'doc-x', referenceId: 'ref-y', annotationId: null })],
onMarkRead: () => {},
onMarkAllRead: () => {}
}
});
const link = document.querySelector('ul[role="list"] li a') as HTMLAnchorElement;
expect(link.getAttribute('href')).toContain('doc-x');
});
});

View File

@@ -1,117 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import ChronikRow from './ChronikRow.svelte';
afterEach(cleanup);
const baseActor = { id: 'a1', name: 'Anna Schmidt', initials: 'AS', color: '#012851' };
const makeItem = (overrides: Record<string, unknown> = {}) => ({
id: 'i1',
kind: 'TEXT_SAVED' as string,
actor: baseActor as null | typeof baseActor,
documentId: 'd1',
documentTitle: 'Brief 1923',
count: 1,
happenedAt: '2026-04-15T10:00:00Z',
happenedAtUntil: null as string | null,
commentId: null as string | null,
commentPreview: null as string | null,
annotationId: null as string | null,
youMentioned: false,
...overrides
});
describe('ChronikRow', () => {
it('renders the actor avatar with initials when actor is present', async () => {
render(ChronikRow, { props: { item: makeItem() } });
expect(document.body.textContent).toContain('AS');
});
it('renders the question-mark fallback avatar when actor is null', async () => {
render(ChronikRow, { props: { item: makeItem({ actor: null }) } });
const fallback = document.querySelector('[data-testid="chronik-avatar-fallback"]');
expect(fallback).not.toBeNull();
});
it('renders the for-you marker when youMentioned is true', async () => {
render(ChronikRow, { props: { item: makeItem({ youMentioned: true }) } });
const marker = document.querySelector('[data-testid="chronik-foryou-marker"]');
expect(marker).not.toBeNull();
});
it('renders the for-you data-variant when youMentioned is true', async () => {
render(ChronikRow, { props: { item: makeItem({ youMentioned: true }) } });
const link = document.querySelector('a[data-variant]') as HTMLElement;
expect(link.getAttribute('data-variant')).toBe('for-you');
});
it('renders the rollup variant when count > 1', async () => {
render(ChronikRow, { props: { item: makeItem({ count: 3 }) } });
const link = document.querySelector('a[data-variant]') as HTMLElement;
expect(link.getAttribute('data-variant')).toBe('rollup');
const badge = document.querySelector('[data-testid="chronik-count-badge"]');
expect(badge).not.toBeNull();
});
it('renders the comment variant for COMMENT_ADDED kind', async () => {
render(ChronikRow, {
props: { item: makeItem({ kind: 'COMMENT_ADDED', commentPreview: 'Tolle Geschichte!' }) }
});
const link = document.querySelector('a[data-variant]') as HTMLElement;
expect(link.getAttribute('data-variant')).toBe('comment');
const preview = document.querySelector('[data-testid="chronik-comment-preview"]');
expect(preview?.textContent).toContain('Tolle Geschichte!');
});
it('falls back to ellipsis comment preview when commentPreview is null', async () => {
render(ChronikRow, { props: { item: makeItem({ kind: 'COMMENT_ADDED' }) } });
const preview = document.querySelector('[data-testid="chronik-comment-preview"]');
expect(preview?.textContent).toContain('…');
});
it('renders the document title in a styled span', async () => {
render(ChronikRow, { props: { item: makeItem() } });
const title = document.querySelector('[data-testid="chronik-doc-title"]');
expect(title?.textContent).toBe('Brief 1923');
});
it('uses /documents/{id} as default href', async () => {
render(ChronikRow, { props: { item: makeItem() } });
const link = document.querySelector('a[data-variant]') as HTMLAnchorElement;
expect(link.href).toContain('/documents/d1');
});
it('uses comment-deep-link href when commentId is set', async () => {
render(ChronikRow, {
props: { item: makeItem({ commentId: 'c1', kind: 'COMMENT_ADDED' }) }
});
const link = document.querySelector('a[data-variant]') as HTMLAnchorElement;
expect(link.href).toContain('c1');
});
it('renders a time-range label when rollup has happenedAtUntil', async () => {
render(ChronikRow, {
props: {
item: makeItem({
count: 5,
happenedAt: '2026-04-15T10:00:00Z',
happenedAtUntil: '2026-04-15T14:30:00Z'
})
}
});
// Time range uses U+2013 between two HH:MM strings — check for any colon-bearing time
expect(document.body.textContent).toMatch(/\d{2}:\d{2}/);
});
});

View File

@@ -1,67 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import ChronikTimeline from './ChronikTimeline.svelte';
afterEach(cleanup);
const baseActor = { id: 'a1', name: 'Anna Schmidt', initials: 'AS', color: '#012851' };
const makeItem = (overrides: Record<string, unknown> = {}) => ({
id: 'i1',
kind: 'TEXT_SAVED' as string,
actor: baseActor,
documentId: 'd1',
documentTitle: 'Brief 1923',
count: 1,
happenedAt: new Date().toISOString(),
youMentioned: false,
...overrides
});
describe('ChronikTimeline', () => {
it('renders nothing when items is empty', async () => {
render(ChronikTimeline, { props: { items: [] } });
const buckets = document.querySelectorAll('[data-testid^="chronik-bucket-"]');
expect(buckets.length).toBe(0);
});
it('renders the today bucket for today items', async () => {
const today = new Date();
render(ChronikTimeline, {
props: { items: [makeItem({ id: 'i1', happenedAt: today.toISOString() })] }
});
const today_bucket = document.querySelector('[data-testid="chronik-bucket-today"]');
expect(today_bucket).not.toBeNull();
});
it('renders the older bucket for old items', async () => {
render(ChronikTimeline, {
props: { items: [makeItem({ id: 'i1', happenedAt: '2020-01-01T10:00:00Z' })] }
});
const olderBucket = document.querySelector('[data-testid="chronik-bucket-older"]');
expect(olderBucket).not.toBeNull();
});
it('renders multiple buckets when items span time ranges', async () => {
const today = new Date();
render(ChronikTimeline, {
props: {
items: [
makeItem({ id: 'i1', kind: 'TEXT_SAVED', happenedAt: today.toISOString() }),
makeItem({
id: 'i2',
kind: 'FILE_UPLOADED',
documentId: 'd2',
happenedAt: '2020-01-01T10:00:00Z'
})
]
}
});
const buckets = document.querySelectorAll('[data-testid^="chronik-bucket-"]');
expect(buckets.length).toBeGreaterThanOrEqual(2);
});
});

View File

@@ -1,161 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DashboardActivityFeed from './DashboardActivityFeed.svelte';
import type { components } from '$lib/generated/api';
type ActivityFeedItemDTO = components['schemas']['ActivityFeedItemDTO'];
afterEach(cleanup);
const baseItem = (overrides: Partial<ActivityFeedItemDTO> = {}): ActivityFeedItemDTO =>
({
kind: 'TEXT_SAVED',
documentId: 'doc-1',
documentTitle: 'Brief 1899',
actor: {
id: 'u-1',
name: 'Anna Schmidt',
initials: 'AS',
color: '#336699'
},
count: 1,
happenedAt: '2026-04-14T14:02:00Z',
happenedAtUntil: null,
youMentioned: false,
...overrides
}) as ActivityFeedItemDTO;
describe('DashboardActivityFeed', () => {
it('renders the feed caption and show-all link', async () => {
render(DashboardActivityFeed, { props: { feed: [] } });
await expect.element(page.getByText('Kommentare & Aktivität')).toBeVisible();
const link = document.querySelector('a[href="/aktivitaeten"]');
expect(link).not.toBeNull();
});
it('renders nothing in the list when the feed is empty', async () => {
render(DashboardActivityFeed, { props: { feed: [] } });
const lists = document.querySelectorAll('ul');
expect(lists.length).toBe(0);
});
it('renders one row per feed item with the actor initials', async () => {
render(DashboardActivityFeed, {
props: {
feed: [baseItem(), baseItem({ documentId: 'doc-2', documentTitle: 'Brief 1900' })]
}
});
const items = document.querySelectorAll('li');
expect(items.length).toBe(2);
expect(document.body.textContent).toContain('AS');
});
it('renders the question-mark badge when no actor is set', async () => {
render(DashboardActivityFeed, {
props: { feed: [baseItem({ actor: null as unknown as undefined })] }
});
const li = document.querySelector('li');
expect(li?.textContent).toContain('?');
});
it('renders the rollup count badge when count > 1', async () => {
render(DashboardActivityFeed, {
props: { feed: [baseItem({ count: 5 })] }
});
const badge = document.querySelector('[data-testid="feed-rollup-count"]');
expect(badge?.textContent?.trim()).toBe('5');
});
it('omits the rollup count badge when count is 1', async () => {
render(DashboardActivityFeed, { props: { feed: [baseItem({ count: 1 })] } });
const badge = document.querySelector('[data-testid="feed-rollup-count"]');
expect(badge).toBeNull();
});
it('renders the "für dich" badge when youMentioned is true', async () => {
render(DashboardActivityFeed, {
props: { feed: [baseItem({ youMentioned: true })] }
});
await expect.element(page.getByText(/für dich/i)).toBeVisible();
});
it('maps the kind enum to a localized verb (TEXT_SAVED)', async () => {
render(DashboardActivityFeed, {
props: { feed: [baseItem({ kind: 'TEXT_SAVED' as ActivityFeedItemDTO['kind'] })] }
});
expect(document.body.textContent).toContain('hat Text gespeichert in');
});
it('maps the kind enum to a localized verb (FILE_UPLOADED)', async () => {
render(DashboardActivityFeed, {
props: { feed: [baseItem({ kind: 'FILE_UPLOADED' as ActivityFeedItemDTO['kind'] })] }
});
expect(document.body.textContent).toContain('hat eine Datei hochgeladen');
});
it('falls back to the raw kind when no verb is mapped', async () => {
render(DashboardActivityFeed, {
props: {
feed: [baseItem({ kind: 'UNKNOWN_KIND' as unknown as ActivityFeedItemDTO['kind'] })]
}
});
expect(document.body.textContent).toContain('UNKNOWN_KIND');
});
it('renders a rollup time range when happenedAtUntil is set and count > 1', async () => {
render(DashboardActivityFeed, {
props: {
feed: [
baseItem({
happenedAt: '2026-04-14T14:02:00Z',
happenedAtUntil: '2026-04-14T14:32:00Z',
count: 3
})
]
}
});
// "14:0214:32" appears (with the en-dash)
expect(document.body.textContent).toMatch(/\d{2}:\d{2}\d{2}:\d{2}/);
});
it('uses the actor initials as the fallback name when name is null', async () => {
render(DashboardActivityFeed, {
props: {
feed: [
baseItem({
actor: {
id: 'u-2',
name: null as unknown as undefined,
initials: 'XR',
color: '#000'
}
})
]
}
});
const strong = document.querySelector('strong');
expect(strong?.textContent).toBe('XR');
});
it('builds the document detail href from documentId', async () => {
render(DashboardActivityFeed, {
props: { feed: [baseItem({ documentId: 'doc-xyz', documentTitle: 'Brief 1901' })] }
});
const link = document.querySelector('a[href="/documents/doc-xyz"]');
expect(link).not.toBeNull();
});
});

View File

@@ -1,207 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DocumentMetadataDrawer from './DocumentMetadataDrawer.svelte';
afterEach(cleanup);
const sender = { id: 's1', firstName: 'Anna', lastName: 'Schmidt', displayName: 'Anna Schmidt' };
const receiver = (id: string, name: string) => ({
id,
firstName: name.split(' ')[0],
lastName: name.split(' ').slice(1).join(' ') || name,
displayName: name
});
const baseProps = {
documentDate: '1923-04-15' as string | null,
location: 'Berlin' as string | null,
status: 'UPLOADED',
sender: null as typeof sender | null,
receivers: [] as ReturnType<typeof receiver>[],
tags: [] as { id: string; name: string }[],
inferredRelationship: null,
geschichten: [] as {
id: string;
title: string;
publishedAt?: string;
author?: { firstName?: string; lastName?: string; email: string };
}[],
documentId: 'doc-1',
canBlogWrite: false
};
describe('DocumentMetadataDrawer', () => {
it('renders the three default section headings', async () => {
render(DocumentMetadataDrawer, { props: baseProps });
await expect.element(page.getByRole('heading', { name: 'Details' })).toBeVisible();
await expect.element(page.getByRole('heading', { name: 'Personen' })).toBeVisible();
await expect.element(page.getByRole('heading', { name: 'Schlagwörter' })).toBeVisible();
});
it('renders the formatted long date when documentDate is provided', async () => {
render(DocumentMetadataDrawer, { props: baseProps });
// formatDate default ('long') format is "15. April 1923" in de-DE.
await expect.element(page.getByText(/1923/)).toBeVisible();
});
it('renders an em-dash when documentDate is null', async () => {
render(DocumentMetadataDrawer, { props: { ...baseProps, documentDate: null } });
// The dash appears in date AND location AND geschichten — multiple matches expected
const dashes = document.querySelectorAll('dd, p');
const dashTexts = Array.from(dashes)
.map((el) => el.textContent?.trim())
.filter((t) => t === '—');
expect(dashTexts.length).toBeGreaterThan(0);
});
it('renders the no-persons placeholder when sender and receivers are empty', async () => {
render(DocumentMetadataDrawer, { props: baseProps });
await expect.element(page.getByText('Keine Personen zugeordnet')).toBeVisible();
});
it('renders the sender and inferred relationship label when both are present', async () => {
render(DocumentMetadataDrawer, {
props: {
...baseProps,
sender,
inferredRelationship: { labelFromA: 'Vater', labelFromB: 'Tochter' }
}
});
await expect.element(page.getByText('Anna Schmidt')).toBeVisible();
});
it('renders the receivers list with up to five visible by default', async () => {
const receivers = Array.from({ length: 7 }, (_, i) => receiver(`r${i}`, `Person ${i}`));
render(DocumentMetadataDrawer, {
props: { ...baseProps, sender, receivers }
});
await expect.element(page.getByText('Person 0')).toBeVisible();
await expect.element(page.getByText('Person 4')).toBeVisible();
await expect.element(page.getByText('Person 5')).not.toBeInTheDocument();
});
it('renders the +N more button when there are more than five receivers', async () => {
const receivers = Array.from({ length: 8 }, (_, i) => receiver(`r${i}`, `Person ${i}`));
render(DocumentMetadataDrawer, {
props: { ...baseProps, sender, receivers }
});
await expect.element(page.getByRole('button', { name: /\+3 weitere/i })).toBeVisible();
});
it('expands the receiver list when the +N more button is clicked', async () => {
const receivers = Array.from({ length: 8 }, (_, i) => receiver(`r${i}`, `Person ${i}`));
render(DocumentMetadataDrawer, {
props: { ...baseProps, sender, receivers }
});
await page.getByRole('button', { name: /\+3 weitere/i }).click();
await expect.element(page.getByText('Person 7')).toBeVisible();
});
it('renders the no-tags placeholder when tags is empty', async () => {
render(DocumentMetadataDrawer, { props: baseProps });
await expect.element(page.getByText('Keine Schlagwörter zugeordnet')).toBeVisible();
});
it('renders one anchor per tag when tags are present', async () => {
render(DocumentMetadataDrawer, {
props: {
...baseProps,
tags: [
{ id: 't1', name: 'Familie' },
{ id: 't2', name: 'Reise' }
]
}
});
await expect
.element(page.getByRole('link', { name: 'Familie' }))
.toHaveAttribute('href', '/?tag=Familie');
await expect
.element(page.getByRole('link', { name: 'Reise' }))
.toHaveAttribute('href', '/?tag=Reise');
});
it('hides the geschichten column when there are no stories and no canBlogWrite', async () => {
render(DocumentMetadataDrawer, { props: baseProps });
await expect
.element(page.getByRole('heading', { name: 'Geschichten' }))
.not.toBeInTheDocument();
});
it('shows the geschichten column when canBlogWrite is true even with no stories', async () => {
render(DocumentMetadataDrawer, { props: { ...baseProps, canBlogWrite: true } });
await expect.element(page.getByRole('heading', { name: 'Geschichten' })).toBeVisible();
});
it('renders the attach link to the new-geschichte route when canBlogWrite + documentId', async () => {
render(DocumentMetadataDrawer, {
props: { ...baseProps, canBlogWrite: true, documentId: 'doc-42' }
});
const links = document.querySelectorAll('a[href*="/geschichten/new?documentId="]');
expect(links.length).toBe(1);
expect((links[0] as HTMLAnchorElement).href).toContain('documentId=doc-42');
});
it('renders the geschichten list when stories are present', async () => {
render(DocumentMetadataDrawer, {
props: {
...baseProps,
geschichten: [
{
id: 'g1',
title: 'Reise nach Berlin',
publishedAt: '2026-04-15T10:00:00Z',
author: { firstName: 'Anna', lastName: 'Schmidt', email: 'anna@x' }
}
]
}
});
await expect.element(page.getByRole('link', { name: /reise nach berlin/i })).toBeVisible();
});
it('renders the show-all geschichten link when there are at least three stories', async () => {
render(DocumentMetadataDrawer, {
props: {
...baseProps,
geschichten: Array.from({ length: 3 }, (_, i) => ({
id: `g${i}`,
title: `Geschichte ${i}`,
publishedAt: '2026-04-15T10:00:00Z',
author: { firstName: 'Anna', lastName: 'Schmidt', email: 'anna@x' }
}))
}
});
await expect.element(page.getByText(/zeige alle|alle/i)).toBeVisible();
});
it('renders the receiver-only inferred relationship pill only when there is exactly one receiver', async () => {
render(DocumentMetadataDrawer, {
props: {
...baseProps,
sender,
receivers: [receiver('r1', 'Bert Meier')],
inferredRelationship: { labelFromA: 'Vater', labelFromB: 'Tochter' }
}
});
// Both labels should be visible — Vater for sender, Tochter for the single receiver
await expect.element(page.getByText(/vater/i)).toBeVisible();
await expect.element(page.getByText(/tochter/i)).toBeVisible();
});
});

View File

@@ -1,96 +0,0 @@
<script lang="ts">
import { m } from '$lib/paraglide/messages.js';
import { clickOutside } from '$lib/shared/actions/clickOutside';
type Props = {
canWrite: boolean;
isPdf: boolean;
transcribeMode: boolean;
filePath?: string | null;
originalFilename?: string | null;
fileUrl: string;
};
let {
canWrite,
isPdf,
transcribeMode = $bindable(),
filePath = null,
originalFilename = null,
fileUrl
}: Props = $props();
let mobileMenuOpen = $state(false);
function startTranscribe() {
transcribeMode = true;
mobileMenuOpen = false;
}
</script>
<div role="group" class="relative" use:clickOutside onclickoutside={() => (mobileMenuOpen = false)}>
<button
type="button"
onclick={() => (mobileMenuOpen = !mobileMenuOpen)}
aria-label={m.topbar_more_actions()}
aria-haspopup="true"
aria-expanded={mobileMenuOpen}
class="flex h-9 w-9 items-center justify-center rounded border border-line bg-muted transition hover:bg-accent focus-visible:ring-2 focus-visible:ring-primary"
>
<img
src="/degruyter-icons/Simple/Medium-24px/SVG/Action/View-More-MD.svg"
alt=""
aria-hidden="true"
class="h-5 w-5"
/>
</button>
{#if mobileMenuOpen}
<div
role="menu"
class="absolute top-full right-0 z-50 mt-1 min-w-[200px] rounded-md border border-line bg-surface p-2 shadow-lg"
>
{#if canWrite && isPdf && !transcribeMode}
<button
onclick={startTranscribe}
aria-label={m.transcription_mode_label()}
aria-pressed={false}
class="flex w-full items-center gap-2 rounded px-3 py-2 text-left text-[16px] text-ink transition hover:bg-muted focus-visible:ring-2 focus-visible:ring-primary"
>
<svg
class="h-5 w-5 shrink-0"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
stroke-width="1.5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M19.5 14.25v-2.625a3.375 3.375 0 00-3.375-3.375h-1.5A1.125 1.125 0 0113.5 7.125v-1.5a3.375 3.375 0 00-3.375-3.375H8.25m0 12.75h7.5m-7.5 3H12M10.5 2.25H5.625c-.621 0-1.125.504-1.125 1.125v17.25c0 .621.504 1.125 1.125 1.125h12.75c.621 0 1.125-.504 1.125-1.125V11.25a9 9 0 00-9-9z"
/>
</svg>
{m.transcription_mode_label()}
</button>
{/if}
{#if filePath}
<a
href={fileUrl}
download={originalFilename}
onclick={() => (mobileMenuOpen = false)}
class="flex items-center gap-2 rounded px-3 py-2 text-[16px] text-ink transition hover:bg-muted focus-visible:ring-2 focus-visible:ring-primary"
title={m.doc_download_title()}
>
<img
src="/degruyter-icons/Simple/Medium-24px/SVG/Action/Download-MD.svg"
alt=""
aria-hidden="true"
class="h-5 w-5 shrink-0"
/>
{m.doc_download_title()}
</a>
{/if}
</div>
{/if}
</div>

View File

@@ -1,91 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DocumentMobileMenu from './DocumentMobileMenu.svelte';
afterEach(cleanup);
const baseProps = {
canWrite: false,
isPdf: false,
transcribeMode: false,
filePath: null as string | null,
originalFilename: 'brief.pdf' as string | null,
fileUrl: ''
};
describe('DocumentMobileMenu', () => {
it('renders the kebab trigger button with the more-actions aria-label', async () => {
render(DocumentMobileMenu, { props: { ...baseProps, filePath: 'docs/x.pdf' } });
await expect.element(page.getByRole('button', { name: /weitere aktionen/i })).toBeVisible();
});
it('starts with the dropdown closed (aria-expanded=false)', async () => {
render(DocumentMobileMenu, { props: { ...baseProps, filePath: 'docs/x.pdf' } });
await expect
.element(page.getByRole('button', { name: /weitere aktionen/i }))
.toHaveAttribute('aria-expanded', 'false');
});
it('opens the dropdown when the trigger is clicked', async () => {
render(DocumentMobileMenu, { props: { ...baseProps, filePath: 'docs/x.pdf' } });
await page.getByRole('button', { name: /weitere aktionen/i }).click();
await expect
.element(page.getByRole('button', { name: /weitere aktionen/i }))
.toHaveAttribute('aria-expanded', 'true');
});
it('shows the transcribe action inside the open menu when canWrite, isPdf, and not in transcribe mode', async () => {
render(DocumentMobileMenu, {
props: { ...baseProps, canWrite: true, isPdf: true, filePath: 'docs/x.pdf' }
});
await page.getByRole('button', { name: /weitere aktionen/i }).click();
await expect.element(page.getByRole('button', { name: /transkribieren/i })).toBeVisible();
});
it('hides the transcribe action when already in transcribeMode', async () => {
render(DocumentMobileMenu, {
props: {
...baseProps,
canWrite: true,
isPdf: true,
transcribeMode: true,
filePath: 'docs/x.pdf'
}
});
await page.getByRole('button', { name: /weitere aktionen/i }).click();
await expect
.element(page.getByRole('button', { name: /transkribieren/i }))
.not.toBeInTheDocument();
});
it('shows the download link inside the open menu when filePath is present', async () => {
render(DocumentMobileMenu, {
props: { ...baseProps, filePath: 'docs/x.pdf', fileUrl: '/api/docs/x' }
});
await page.getByRole('button', { name: /weitere aktionen/i }).click();
await expect.element(page.getByRole('link', { name: /herunterladen/i })).toBeVisible();
});
it('omits the download link when filePath is null', async () => {
render(DocumentMobileMenu, {
props: { ...baseProps, canWrite: true, isPdf: true }
});
await page.getByRole('button', { name: /weitere aktionen/i }).click();
await expect
.element(page.getByRole('link', { name: /herunterladen/i }))
.not.toBeInTheDocument();
});
});

View File

@@ -1,150 +0,0 @@
import { describe, it, expect, vi, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
vi.mock('$app/navigation', () => ({
beforeNavigate: () => {},
afterNavigate: () => {},
goto: vi.fn(),
invalidate: vi.fn(),
invalidateAll: vi.fn(),
preloadCode: vi.fn(),
preloadData: vi.fn(),
pushState: vi.fn(),
replaceState: vi.fn(),
disableScrollHandling: vi.fn(),
onNavigate: () => () => {}
}));
const { default: DocumentRow } = await import('./DocumentRow.svelte');
afterEach(cleanup);
const sender = { id: 's1', displayName: 'Anna Schmidt' };
const receiver = { id: 'r1', displayName: 'Bert Meier' };
const makeDoc = (overrides: Record<string, unknown> = {}) => ({
id: 'd1',
title: 'Brief 1923',
originalFilename: 'b.pdf',
documentDate: '1923-04-15',
sender,
receivers: [receiver],
tags: [],
thumbnailUrl: null,
contentType: 'application/pdf',
summary: null,
archiveBox: null,
archiveFolder: null,
location: null,
...overrides
});
const baseItem = (docOverrides: Record<string, unknown> = {}) => ({
document: makeDoc(docOverrides),
matchData: null,
completionPercentage: 0,
contributors: []
});
describe('DocumentRow', () => {
it('renders the title', async () => {
render(DocumentRow, { props: { item: baseItem() } });
await expect
.element(page.getByRole('heading', { level: 3, name: /brief 1923/i }))
.toBeVisible();
});
it('falls back to originalFilename when title is null', async () => {
render(DocumentRow, { props: { item: baseItem({ title: null }) } });
await expect.element(page.getByRole('heading', { level: 3, name: /b\.pdf/i })).toBeVisible();
});
it('renders the sender name in the metadata column', async () => {
render(DocumentRow, { props: { item: baseItem() } });
await expect.element(page.getByText('Anna Schmidt')).toBeVisible();
});
it('renders the unknown placeholder when sender is null', async () => {
render(DocumentRow, { props: { item: baseItem({ sender: null }) } });
const unknownTexts = document.querySelectorAll('.italic');
const hasUnknown = Array.from(unknownTexts).some((el) => el.textContent?.includes('Unbekannt'));
expect(hasUnknown).toBe(true);
});
it('renders one tag button per document tag', async () => {
render(DocumentRow, {
props: {
item: baseItem({
tags: [
{ id: 't1', name: 'Familie', color: null },
{ id: 't2', name: 'Reise', color: '#ffaabb' }
]
})
}
});
await expect.element(page.getByRole('button', { name: 'Familie' })).toBeVisible();
await expect.element(page.getByRole('button', { name: 'Reise' })).toBeVisible();
});
it('renders the bulk-select checkbox when canWrite is true', async () => {
render(DocumentRow, { props: { item: baseItem(), canWrite: true } });
const checkbox = document.querySelector('input[type="checkbox"]');
expect(checkbox).not.toBeNull();
});
it('hides the bulk-select checkbox when canWrite is false', async () => {
render(DocumentRow, { props: { item: baseItem(), canWrite: false } });
const checkbox = document.querySelector('input[type="checkbox"]');
expect(checkbox).toBeNull();
});
it('renders archive chips when archive metadata is present', async () => {
render(DocumentRow, {
props: {
item: baseItem({ archiveBox: 'Box 1', archiveFolder: 'Mappe A', location: 'Berlin' })
}
});
await expect.element(page.getByText('Box 1')).toBeVisible();
await expect.element(page.getByText('Mappe A')).toBeVisible();
await expect.element(page.getByText('Berlin')).toBeVisible();
});
it('renders the snippet when matchData provides a transcriptionSnippet', async () => {
render(DocumentRow, {
props: {
item: {
document: makeDoc(),
matchData: { transcriptionSnippet: 'Hello world snippet' },
completionPercentage: 50,
contributors: []
}
}
});
await expect.element(page.getByTestId('search-snippet')).toBeVisible();
});
it('renders the summary when present', async () => {
render(DocumentRow, {
props: { item: baseItem({ summary: 'Brief über die Reise nach Berlin' }) }
});
await expect.element(page.getByTestId('doc-summary')).toBeVisible();
});
it('renders an em-dash for missing documentDate', async () => {
render(DocumentRow, { props: { item: baseItem({ documentDate: null }) } });
// Multiple em-dashes possible; just ensure at least one is rendered
expect(document.body.textContent).toContain('—');
});
});

View File

@@ -1,50 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DocumentStatusChip from './DocumentStatusChip.svelte';
afterEach(cleanup);
describe('DocumentStatusChip', () => {
it('renders the placeholder label and gray dot for PLACEHOLDER status', async () => {
render(DocumentStatusChip, { props: { status: 'PLACEHOLDER' } });
const dot = await page.getByTitle('Platzhalter').element();
expect(dot.classList.contains('bg-gray-400')).toBe(true);
});
it('renders the uploaded label and emerald dot for UPLOADED status', async () => {
render(DocumentStatusChip, { props: { status: 'UPLOADED' } });
const dot = await page.getByTitle('Hochgeladen').element();
expect(dot.classList.contains('bg-emerald-500')).toBe(true);
});
it('renders the transcribed label and blue dot for TRANSCRIBED status', async () => {
render(DocumentStatusChip, { props: { status: 'TRANSCRIBED' } });
const dot = await page.getByTitle('Transkribiert').element();
expect(dot.classList.contains('bg-blue-400')).toBe(true);
});
it('renders the reviewed label and amber dot for REVIEWED status', async () => {
render(DocumentStatusChip, { props: { status: 'REVIEWED' } });
const dot = await page.getByTitle('Geprüft').element();
expect(dot.classList.contains('bg-amber-400')).toBe(true);
});
it('renders the archived label and dark emerald dot for ARCHIVED status', async () => {
render(DocumentStatusChip, { props: { status: 'ARCHIVED' } });
const dot = await page.getByTitle('Archiviert').element();
expect(dot.classList.contains('bg-emerald-600')).toBe(true);
});
it('exposes the status as both a title tooltip and an aria-label', async () => {
render(DocumentStatusChip, { props: { status: 'UPLOADED' } });
const dot = await page.getByTitle('Hochgeladen').element();
expect(dot.getAttribute('aria-label')).toBe('Hochgeladen');
});
});

View File

@@ -1,61 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import DocumentThumbnail from './DocumentThumbnail.svelte';
afterEach(cleanup);
describe('DocumentThumbnail', () => {
it('renders the supplied thumbnail image when thumbnailUrl is set', async () => {
render(DocumentThumbnail, {
props: {
doc: { id: 'd1', thumbnailUrl: '/api/d1/thumb', contentType: 'application/pdf' }
}
});
const img = document.querySelector('img') as HTMLImageElement;
expect(img).not.toBeNull();
expect(img.src).toContain('/api/d1/thumb');
});
it('renders the placeholder icon when thumbnailUrl is missing', async () => {
render(DocumentThumbnail, {
props: { doc: { id: 'd1', thumbnailUrl: null, contentType: 'application/pdf' } }
});
const svg = document.querySelector('svg');
expect(svg).not.toBeNull();
});
it('uses the small container size by default', async () => {
render(DocumentThumbnail, {
props: { doc: { id: 'd1', thumbnailUrl: null, contentType: 'application/pdf' } }
});
const container = document.querySelector('.h-\\[84px\\]');
expect(container).not.toBeNull();
});
it('uses the large container size when size="lg"', async () => {
render(DocumentThumbnail, {
props: {
doc: { id: 'd1', thumbnailUrl: null, contentType: 'application/pdf' },
size: 'lg'
}
});
const container = document.querySelector('.h-\\[168px\\]');
expect(container).not.toBeNull();
});
it('uses lazy loading attributes on the thumbnail image', async () => {
render(DocumentThumbnail, {
props: {
doc: { id: 'd1', thumbnailUrl: '/api/d1/thumb', contentType: 'application/pdf' }
}
});
const img = document.querySelector('img') as HTMLImageElement;
expect(img.loading).toBe('lazy');
expect(img.decoding).toBe('async');
});
});

View File

@@ -1,12 +1,11 @@
<script lang="ts">
import { m } from '$lib/paraglide/messages.js';
import { slide } from 'svelte/transition';
import { formatDate } from '$lib/shared/utils/date';
import { clickOutside } from '$lib/shared/actions/clickOutside';
import PersonChipRow from '$lib/person/PersonChipRow.svelte';
import OverflowPillButton from '$lib/shared/primitives/OverflowPillButton.svelte';
import DocumentMetadataDrawer from './DocumentMetadataDrawer.svelte';
import DocumentTopBarTitle from './DocumentTopBarTitle.svelte';
import DocumentTopBarActions from './DocumentTopBarActions.svelte';
import DocumentMobileMenu from './DocumentMobileMenu.svelte';
import BackButton from '$lib/shared/primitives/BackButton.svelte';
type Person = { id: string; firstName?: string | null; lastName: string; displayName: string };
@@ -59,8 +58,93 @@ const isPdf = $derived(!!doc.filePath && doc.contentType?.startsWith('applicatio
const receivers = $derived(doc.receivers ?? []);
const extraCount = $derived(Math.max(0, receivers.length - 2));
const overflowPersons = $derived(receivers.slice(2));
const shortDate = $derived(doc.documentDate ? formatDate(doc.documentDate, 'short') : null);
const longDate = $derived(doc.documentDate ? formatDate(doc.documentDate, 'long') : null);
let mobileMenuOpen = $state(false);
</script>
{#snippet transcribeBtn(mobile: boolean)}
<button
onclick={() => {
transcribeMode = true;
if (mobile) mobileMenuOpen = false;
}}
aria-label={m.transcription_mode_label()}
aria-pressed={false}
class={mobile
? 'flex w-full items-center gap-2 rounded px-3 py-2 text-left text-[16px] text-ink transition hover:bg-muted focus-visible:ring-2 focus-visible:ring-primary'
: 'hidden items-center gap-1.5 rounded border border-primary px-3 py-1.5 font-sans text-[16px] font-medium text-ink transition hover:bg-primary hover:text-primary-fg focus-visible:ring-2 focus-visible:ring-primary md:flex'}
>
<svg
class="h-5 w-5 shrink-0"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
stroke-width="1.5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M19.5 14.25v-2.625a3.375 3.375 0 00-3.375-3.375h-1.5A1.125 1.125 0 0113.5 7.125v-1.5a3.375 3.375 0 00-3.375-3.375H8.25m0 12.75h7.5m-7.5 3H12M10.5 2.25H5.625c-.621 0-1.125.504-1.125 1.125v17.25c0 .621.504 1.125 1.125 1.125h12.75c.621 0 1.125-.504 1.125-1.125V11.25a9 9 0 00-9-9z"
/>
</svg>
{m.transcription_mode_label()}
</button>
{/snippet}
{#snippet transcribeStopBtn(mobile: boolean)}
<button
onclick={() => {
transcribeMode = false;
if (mobile) mobileMenuOpen = false;
}}
aria-label={m.transcription_mode_stop()}
aria-pressed={true}
class={mobile
? 'flex w-full items-center gap-2 rounded bg-primary px-3 py-2 text-left text-[16px] text-primary-fg transition focus-visible:ring-2 focus-visible:ring-primary'
: 'flex items-center gap-1.5 rounded bg-primary px-3 py-1.5 font-sans text-[16px] font-medium text-primary-fg transition focus-visible:ring-2 focus-visible:ring-primary'}
>
<svg
class="h-5 w-5 shrink-0"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
stroke-width="1.5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M19.5 14.25v-2.625a3.375 3.375 0 00-3.375-3.375h-1.5A1.125 1.125 0 0113.5 7.125v-1.5a3.375 3.375 0 00-3.375-3.375H8.25m0 12.75h7.5m-7.5 3H12M10.5 2.25H5.625c-.621 0-1.125.504-1.125 1.125v17.25c0 .621.504 1.125 1.125 1.125h12.75c.621 0 1.125-.504 1.125-1.125V11.25a9 9 0 00-9-9z"
/>
</svg>
{m.transcription_mode_stop()}
</button>
{/snippet}
{#snippet downloadLink(mobile: boolean)}
<a
href={fileUrl}
download={doc.originalFilename}
onclick={() => {
if (mobile) mobileMenuOpen = false;
}}
class={mobile
? 'flex items-center gap-2 rounded px-3 py-2 text-[16px] text-ink transition hover:bg-muted focus-visible:ring-2 focus-visible:ring-primary'
: 'hidden rounded border border-transparent bg-muted p-1.5 text-ink transition hover:bg-accent focus-visible:ring-2 focus-visible:ring-primary md:block'}
title={m.doc_download_title()}
>
<img
src="/degruyter-icons/Simple/Medium-24px/SVG/Action/Download-MD.svg"
alt=""
aria-hidden="true"
class="h-5 w-5 shrink-0"
/>
{#if mobile}{m.doc_download_title()}{/if}
</a>
{/snippet}
<div data-topbar class="relative z-10 border-b border-line bg-surface shadow-sm">
<!-- Main row -->
<div class="flex h-[75px] shrink-0 items-center pr-4 xs:h-[88px]">
@@ -77,11 +161,20 @@ const overflowPersons = $derived(receivers.slice(2));
<div class="mx-2 h-6 w-px shrink-0 bg-line"></div>
<!-- Title + meta -->
<DocumentTopBarTitle
title={doc.title}
originalFilename={doc.originalFilename}
documentDate={doc.documentDate}
/>
<div class="min-w-0 flex-1 overflow-hidden">
<h1
class="truncate font-serif text-[18px] leading-tight text-ink lg:text-[20px]"
title={doc.title ?? doc.originalFilename ?? ''}
>
{doc.title || doc.originalFilename}
</h1>
{#if shortDate}
<p class="font-sans text-[16px] text-ink-2">
<span class="lg:hidden">{shortDate}</span>
<span class="hidden lg:inline">{longDate}</span>
</p>
{/if}
</div>
<!-- Chip row — desktop only, hidden on small screens to make room for buttons -->
<div class="mx-3 hidden min-w-0 shrink-0 md:block">
@@ -99,9 +192,7 @@ const overflowPersons = $derived(receivers.slice(2));
onclick={() => (detailsOpen = !detailsOpen)}
aria-expanded={detailsOpen}
aria-label={m.doc_details_toggle()}
class="ml-2 inline-flex min-h-[44px] shrink-0 items-center gap-1.5 rounded border px-3 py-1 font-sans text-sm font-semibold transition-colors {detailsOpen
? 'border-primary bg-primary text-primary-fg'
: 'border-line text-ink-2 hover:bg-muted hover:text-ink'}"
class="ml-2 inline-flex min-h-[44px] shrink-0 items-center gap-1.5 rounded border px-3 py-1 font-sans text-sm font-semibold transition-colors {detailsOpen ? 'border-primary bg-primary text-primary-fg' : 'border-line text-ink-2 hover:bg-muted hover:text-ink'}"
>
{m.doc_details_toggle()}
<svg
@@ -121,26 +212,72 @@ const overflowPersons = $derived(receivers.slice(2));
<!-- Action buttons -->
<div class="flex shrink-0 items-center gap-1.5 font-sans">
<DocumentTopBarActions
documentId={doc.id}
canWrite={canWrite}
isPdf={!!isPdf}
bind:transcribeMode={transcribeMode}
filePath={doc.filePath}
originalFilename={doc.originalFilename}
fileUrl={fileUrl}
/>
{#if canWrite && isPdf && !transcribeMode}
{@render transcribeBtn(false)}
{/if}
{#if (canWrite && isPdf) || doc.filePath}
<div class="md:hidden">
<DocumentMobileMenu
canWrite={canWrite}
isPdf={!!isPdf}
bind:transcribeMode={transcribeMode}
filePath={doc.filePath}
originalFilename={doc.originalFilename}
fileUrl={fileUrl}
{#if transcribeMode}
{@render transcribeStopBtn(false)}
{/if}
{#if canWrite && !transcribeMode}
<a
href="/documents/{doc.id}/edit"
aria-label={m.btn_edit()}
class="flex items-center gap-1.5 rounded border border-primary bg-transparent px-3 py-1.5 text-[16px] font-medium text-ink transition hover:bg-primary hover:text-primary-fg focus-visible:ring-2 focus-visible:ring-primary"
>
<img
src="/degruyter-icons/Simple/Medium-24px/SVG/Action/Edit-Content-MD.svg"
alt=""
aria-hidden="true"
class="h-5 w-5"
/>
<span class="hidden sm:inline">{m.btn_edit()}</span>
</a>
{/if}
{#if doc.filePath && !transcribeMode}
{@render downloadLink(false)}
{/if}
<!-- Kebab menu — mobile only, contains actions hidden below md -->
{#if (canWrite && isPdf) || doc.filePath}
<div
role="group"
class="relative md:hidden"
use:clickOutside
onclickoutside={() => (mobileMenuOpen = false)}
>
<button
type="button"
onclick={() => (mobileMenuOpen = !mobileMenuOpen)}
aria-label={m.topbar_more_actions()}
aria-haspopup="true"
aria-expanded={mobileMenuOpen}
class="flex h-9 w-9 items-center justify-center rounded border border-line bg-muted transition hover:bg-accent focus-visible:ring-2 focus-visible:ring-primary"
>
<img
src="/degruyter-icons/Simple/Medium-24px/SVG/Action/View-More-MD.svg"
alt=""
aria-hidden="true"
class="h-5 w-5"
/>
</button>
{#if mobileMenuOpen}
<div
role="menu"
class="absolute top-full right-0 z-50 mt-1 min-w-[200px] rounded-md border border-line bg-surface p-2 shadow-lg"
>
{#if canWrite && isPdf && !transcribeMode}
{@render transcribeBtn(true)}
{/if}
{#if doc.filePath}
{@render downloadLink(true)}
{/if}
</div>
{/if}
</div>
{/if}
</div>

View File

@@ -1,193 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DocumentTopBar from './DocumentTopBar.svelte';
afterEach(cleanup);
const sender = { id: 's1', firstName: 'Anna', lastName: 'Schmidt', displayName: 'Anna Schmidt' };
const receiver = { id: 'r1', firstName: 'Bert', lastName: 'Meier', displayName: 'Bert Meier' };
const baseDoc = {
id: 'd1',
title: 'Brief an Helene',
originalFilename: 'brief.pdf',
documentDate: '1923-04-15',
sender,
receivers: [receiver],
filePath: null as string | null,
contentType: null as string | null,
location: null,
status: 'UPLOADED',
tags: [] as { id: string; name: string }[]
};
const baseProps = (overrides: Record<string, unknown> = {}) => ({
doc: baseDoc,
canWrite: false,
fileUrl: '',
transcribeMode: false,
inferredRelationship: null,
geschichten: [],
canBlogWrite: false,
...overrides
});
describe('DocumentTopBar', () => {
it('renders the document title as the main heading', async () => {
render(DocumentTopBar, { props: baseProps() });
await expect.element(page.getByRole('heading', { name: 'Brief an Helene' })).toBeVisible();
});
it('falls back to originalFilename when title is missing', async () => {
render(DocumentTopBar, { props: baseProps({ doc: { ...baseDoc, title: null } }) });
await expect.element(page.getByRole('heading', { name: 'brief.pdf' })).toBeVisible();
});
it('renders the short documentDate when one is present', async () => {
render(DocumentTopBar, { props: baseProps() });
await expect.element(page.getByText('15.04.1923')).toBeVisible();
});
it('omits the date paragraph entirely when documentDate is null', async () => {
render(DocumentTopBar, { props: baseProps({ doc: { ...baseDoc, documentDate: null } }) });
await expect.element(page.getByText(/^\d{2}\.\d{2}\.\d{4}$/)).not.toBeInTheDocument();
});
it('does not render the transcribe button when canWrite is false', async () => {
render(DocumentTopBar, {
props: baseProps({ doc: { ...baseDoc, filePath: 'x', contentType: 'application/pdf' } })
});
await expect
.element(page.getByRole('button', { name: /transkribieren/i }))
.not.toBeInTheDocument();
});
it('does not render the transcribe button when contentType is not PDF', async () => {
render(DocumentTopBar, {
props: baseProps({
canWrite: true,
doc: { ...baseDoc, filePath: 'x', contentType: 'image/jpeg' }
})
});
await expect
.element(page.getByRole('button', { name: /transkribieren/i }))
.not.toBeInTheDocument();
});
it('renders the transcribe button when canWrite is true and the file is a PDF', async () => {
render(DocumentTopBar, {
props: baseProps({
canWrite: true,
doc: { ...baseDoc, filePath: 'x', contentType: 'application/pdf' }
})
});
await expect.element(page.getByRole('button', { name: /transkribieren/i })).toBeVisible();
});
it('renders the stop-transcribe button when transcribeMode is true', async () => {
render(DocumentTopBar, {
props: baseProps({
canWrite: true,
transcribeMode: true,
doc: { ...baseDoc, filePath: 'x', contentType: 'application/pdf' }
})
});
await expect.element(page.getByRole('button', { name: /fertig/i })).toBeVisible();
});
it('hides the edit link when transcribeMode is true', async () => {
render(DocumentTopBar, {
props: baseProps({
canWrite: true,
transcribeMode: true,
doc: { ...baseDoc, filePath: 'x', contentType: 'application/pdf' }
})
});
await expect.element(page.getByRole('link', { name: /bearbeiten/i })).not.toBeInTheDocument();
});
it('renders the edit link when canWrite is true and not in transcribeMode', async () => {
render(DocumentTopBar, { props: baseProps({ canWrite: true }) });
await expect
.element(page.getByRole('link', { name: /bearbeiten/i }))
.toHaveAttribute('href', '/documents/d1/edit');
});
it('does not render the edit link when canWrite is false', async () => {
render(DocumentTopBar, { props: baseProps() });
await expect.element(page.getByRole('link', { name: /bearbeiten/i })).not.toBeInTheDocument();
});
it('renders the download link when filePath is present and not in transcribe mode', async () => {
render(DocumentTopBar, {
props: baseProps({ doc: { ...baseDoc, filePath: 'docs/x.pdf' }, fileUrl: '/api/docs/x' })
});
await expect.element(page.getByTitle('Herunterladen')).toBeVisible();
});
it('does not render the download link when filePath is null', async () => {
render(DocumentTopBar, { props: baseProps() });
await expect.element(page.getByTitle('Herunterladen')).not.toBeInTheDocument();
});
it('opens the metadata drawer when the details toggle is clicked', async () => {
render(DocumentTopBar, { props: baseProps() });
await page.getByRole('button', { name: /^details$/i }).click();
await expect
.element(page.getByRole('button', { name: /^details$/i }))
.toHaveAttribute('aria-expanded', 'true');
});
it('renders the mobile kebab menu trigger when filePath is present', async () => {
render(DocumentTopBar, {
props: baseProps({ doc: { ...baseDoc, filePath: 'docs/x.pdf' } })
});
await expect.element(page.getByRole('button', { name: /weitere aktionen/i })).toBeVisible();
});
it('does not render the mobile kebab menu when there is no filePath and no canWrite/PDF combo', async () => {
render(DocumentTopBar, { props: baseProps() });
await expect
.element(page.getByRole('button', { name: /weitere aktionen/i }))
.not.toBeInTheDocument();
});
it('opens the mobile kebab menu when the trigger is clicked', async () => {
render(DocumentTopBar, {
props: baseProps({ doc: { ...baseDoc, filePath: 'docs/x.pdf' } })
});
await page.getByRole('button', { name: /weitere aktionen/i }).click();
await expect
.element(page.getByRole('button', { name: /weitere aktionen/i }))
.toHaveAttribute('aria-expanded', 'true');
});
it('renders the metadata drawer content when detailsOpen is toggled on', async () => {
render(DocumentTopBar, { props: baseProps() });
await page.getByRole('button', { name: /^details$/i }).click();
const drawer = document.querySelector('[data-topbar] > div:nth-child(2)');
expect(drawer).not.toBeNull();
});
});

View File

@@ -1,103 +0,0 @@
<script lang="ts">
import { m } from '$lib/paraglide/messages.js';
type Props = {
documentId: string;
canWrite: boolean;
isPdf: boolean;
transcribeMode: boolean;
filePath?: string | null;
originalFilename?: string | null;
fileUrl: string;
};
let {
documentId,
canWrite,
isPdf,
transcribeMode = $bindable(),
filePath = null,
originalFilename = null,
fileUrl
}: Props = $props();
</script>
{#if canWrite && isPdf && !transcribeMode}
<button
onclick={() => (transcribeMode = true)}
aria-label={m.transcription_mode_label()}
aria-pressed={false}
class="hidden items-center gap-1.5 rounded border border-primary px-3 py-1.5 font-sans text-[16px] font-medium text-ink transition hover:bg-primary hover:text-primary-fg focus-visible:ring-2 focus-visible:ring-primary md:flex"
>
<svg
class="h-5 w-5 shrink-0"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
stroke-width="1.5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M19.5 14.25v-2.625a3.375 3.375 0 00-3.375-3.375h-1.5A1.125 1.125 0 0113.5 7.125v-1.5a3.375 3.375 0 00-3.375-3.375H8.25m0 12.75h7.5m-7.5 3H12M10.5 2.25H5.625c-.621 0-1.125.504-1.125 1.125v17.25c0 .621.504 1.125 1.125 1.125h12.75c.621 0 1.125-.504 1.125-1.125V11.25a9 9 0 00-9-9z"
/>
</svg>
{m.transcription_mode_label()}
</button>
{/if}
{#if transcribeMode}
<button
onclick={() => (transcribeMode = false)}
aria-label={m.transcription_mode_stop()}
aria-pressed={true}
class="flex items-center gap-1.5 rounded bg-primary px-3 py-1.5 font-sans text-[16px] font-medium text-primary-fg transition focus-visible:ring-2 focus-visible:ring-primary"
>
<svg
class="h-5 w-5 shrink-0"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
stroke-width="1.5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M19.5 14.25v-2.625a3.375 3.375 0 00-3.375-3.375h-1.5A1.125 1.125 0 0113.5 7.125v-1.5a3.375 3.375 0 00-3.375-3.375H8.25m0 12.75h7.5m-7.5 3H12M10.5 2.25H5.625c-.621 0-1.125.504-1.125 1.125v17.25c0 .621.504 1.125 1.125 1.125h12.75c.621 0 1.125-.504 1.125-1.125V11.25a9 9 0 00-9-9z"
/>
</svg>
{m.transcription_mode_stop()}
</button>
{/if}
{#if canWrite && !transcribeMode}
<a
href="/documents/{documentId}/edit"
aria-label={m.btn_edit()}
class="flex items-center gap-1.5 rounded border border-primary bg-transparent px-3 py-1.5 text-[16px] font-medium text-ink transition hover:bg-primary hover:text-primary-fg focus-visible:ring-2 focus-visible:ring-primary"
>
<img
src="/degruyter-icons/Simple/Medium-24px/SVG/Action/Edit-Content-MD.svg"
alt=""
aria-hidden="true"
class="h-5 w-5"
/>
<span class="hidden sm:inline">{m.btn_edit()}</span>
</a>
{/if}
{#if filePath && !transcribeMode}
<a
href={fileUrl}
download={originalFilename}
class="hidden rounded border border-transparent bg-muted p-1.5 text-ink transition hover:bg-accent focus-visible:ring-2 focus-visible:ring-primary md:block"
title={m.doc_download_title()}
>
<img
src="/degruyter-icons/Simple/Medium-24px/SVG/Action/Download-MD.svg"
alt=""
aria-hidden="true"
class="h-5 w-5 shrink-0"
/>
</a>
{/if}

View File

@@ -1,94 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DocumentTopBarActions from './DocumentTopBarActions.svelte';
afterEach(cleanup);
const baseProps = {
documentId: 'd1',
canWrite: false,
isPdf: false,
transcribeMode: false,
filePath: null as string | null,
originalFilename: 'brief.pdf' as string | null,
fileUrl: ''
};
describe('DocumentTopBarActions', () => {
it('renders nothing visible when canWrite is false and no file is present', async () => {
render(DocumentTopBarActions, { props: baseProps });
await expect
.element(page.getByRole('button', { name: /transkribieren/i }))
.not.toBeInTheDocument();
await expect.element(page.getByRole('link', { name: /bearbeiten/i })).not.toBeInTheDocument();
await expect.element(page.getByTitle('Herunterladen')).not.toBeInTheDocument();
});
it('renders the transcribe button when canWrite, isPdf, and not transcribing', async () => {
render(DocumentTopBarActions, {
props: { ...baseProps, canWrite: true, isPdf: true, filePath: 'docs/x.pdf' }
});
await expect.element(page.getByRole('button', { name: /transkribieren/i })).toBeVisible();
});
it('omits the transcribe button when not a PDF', async () => {
render(DocumentTopBarActions, {
props: { ...baseProps, canWrite: true, isPdf: false, filePath: 'docs/x.jpg' }
});
await expect
.element(page.getByRole('button', { name: /transkribieren/i }))
.not.toBeInTheDocument();
});
it('renders the stop-transcribe button when transcribeMode is true', async () => {
render(DocumentTopBarActions, {
props: {
...baseProps,
canWrite: true,
isPdf: true,
transcribeMode: true,
filePath: 'docs/x.pdf'
}
});
await expect.element(page.getByRole('button', { name: /fertig/i })).toBeVisible();
});
it('renders the edit link to the document edit route when canWrite and not transcribing', async () => {
render(DocumentTopBarActions, {
props: { ...baseProps, canWrite: true, documentId: 'doc-42' }
});
await expect
.element(page.getByRole('link', { name: /bearbeiten/i }))
.toHaveAttribute('href', '/documents/doc-42/edit');
});
it('hides the edit link when transcribeMode is true', async () => {
render(DocumentTopBarActions, {
props: { ...baseProps, canWrite: true, transcribeMode: true }
});
await expect.element(page.getByRole('link', { name: /bearbeiten/i })).not.toBeInTheDocument();
});
it('renders the download link when filePath is set and not in transcribe mode', async () => {
render(DocumentTopBarActions, {
props: { ...baseProps, filePath: 'docs/x.pdf', fileUrl: '/api/docs/x' }
});
await expect.element(page.getByTitle('Herunterladen')).toBeVisible();
});
it('hides the download link when transcribeMode is true', async () => {
render(DocumentTopBarActions, {
props: { ...baseProps, filePath: 'docs/x.pdf', fileUrl: '/api/docs/x', transcribeMode: true }
});
await expect.element(page.getByTitle('Herunterladen')).not.toBeInTheDocument();
});
});

View File

@@ -1,30 +0,0 @@
<script lang="ts">
import { formatDate } from '$lib/shared/utils/date';
type Props = {
title?: string | null;
originalFilename?: string | null;
documentDate?: string | null;
};
let { title, originalFilename, documentDate }: Props = $props();
const displayTitle = $derived(title || originalFilename || '');
const shortDate = $derived(documentDate ? formatDate(documentDate, 'short') : null);
const longDate = $derived(documentDate ? formatDate(documentDate, 'long') : null);
</script>
<div class="min-w-0 flex-1 overflow-hidden">
<h1
class="truncate font-serif text-[18px] leading-tight text-ink lg:text-[20px]"
title={displayTitle}
>
{displayTitle}
</h1>
{#if shortDate}
<p class="font-sans text-[16px] text-ink-2">
<span class="lg:hidden">{shortDate}</span>
<span class="hidden lg:inline">{longDate}</span>
</p>
{/if}
</div>

View File

@@ -1,64 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DocumentTopBarTitle from './DocumentTopBarTitle.svelte';
afterEach(cleanup);
const baseProps = {
title: 'Brief an Helene' as string | null,
originalFilename: 'brief.pdf' as string | null,
documentDate: '1923-04-15' as string | null
};
describe('DocumentTopBarTitle', () => {
it('renders the title as a level-1 heading', async () => {
render(DocumentTopBarTitle, { props: baseProps });
await expect
.element(page.getByRole('heading', { level: 1, name: 'Brief an Helene' }))
.toBeVisible();
});
it('falls back to originalFilename when title is null', async () => {
render(DocumentTopBarTitle, { props: { ...baseProps, title: null } });
await expect.element(page.getByRole('heading', { name: 'brief.pdf' })).toBeVisible();
});
it('falls back to originalFilename when title is an empty string', async () => {
render(DocumentTopBarTitle, { props: { ...baseProps, title: '' } });
await expect.element(page.getByRole('heading', { name: 'brief.pdf' })).toBeVisible();
});
it('renders the short date format when a documentDate is supplied', async () => {
render(DocumentTopBarTitle, { props: baseProps });
await expect.element(page.getByText('15.04.1923')).toBeVisible();
});
it('omits the date paragraph entirely when documentDate is null', async () => {
render(DocumentTopBarTitle, { props: { ...baseProps, documentDate: null } });
expect(document.querySelector('p')).toBeNull();
});
it('uses the title (not the originalFilename) for the title attribute when title is set', async () => {
render(DocumentTopBarTitle, { props: baseProps });
const heading = (await page
.getByRole('heading', { name: 'Brief an Helene' })
.element()) as HTMLElement;
expect(heading.getAttribute('title')).toBe('Brief an Helene');
});
it('uses the originalFilename for the title attribute when title is null', async () => {
render(DocumentTopBarTitle, { props: { ...baseProps, title: null } });
const heading = (await page
.getByRole('heading', { name: 'brief.pdf' })
.element()) as HTMLElement;
expect(heading.getAttribute('title')).toBe('brief.pdf');
});
});

View File

@@ -1,75 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import DocumentViewer from './DocumentViewer.svelte';
afterEach(cleanup);
const baseProps = {
doc: { id: 'd1', filePath: null, contentType: null, fileHash: null },
fileUrl: '',
isLoading: false,
error: '',
transcribeMode: false,
blockNumbers: {},
annotationReloadKey: 0,
activeAnnotationId: null,
annotationsDimmed: false,
flashAnnotationId: null,
onAnnotationClick: () => {}
};
describe('DocumentViewer', () => {
it('renders the loading spinner and label when isLoading is true', async () => {
render(DocumentViewer, { props: { ...baseProps, isLoading: true } });
await expect.element(page.getByText('Lade Dokument...')).toBeVisible();
});
it('renders the error message when error is set', async () => {
render(DocumentViewer, { props: { ...baseProps, error: 'Datei nicht verfügbar' } });
await expect.element(page.getByText('Datei nicht verfügbar')).toBeVisible();
});
it('shows the direct-download link in the error state when filePath is present', async () => {
render(DocumentViewer, {
props: {
...baseProps,
doc: { ...baseProps.doc, filePath: 'docs/scan.pdf' },
error: 'Render failed'
}
});
await expect
.element(page.getByRole('link', { name: /direkter download/i }))
.toHaveAttribute('href', '/api/documents/d1/file');
});
it('omits the direct-download link in the error state when filePath is null', async () => {
render(DocumentViewer, { props: { ...baseProps, error: 'Render failed' } });
await expect
.element(page.getByRole('link', { name: /direkter download/i }))
.not.toBeInTheDocument();
});
it('renders the no-scan placeholder when filePath is null and there is no error', async () => {
render(DocumentViewer, { props: baseProps });
await expect.element(page.getByText('Kein Scan vorhanden')).toBeVisible();
});
it('renders an <img> for non-PDF content types when fileUrl is present', async () => {
render(DocumentViewer, {
props: {
...baseProps,
doc: { ...baseProps.doc, filePath: 'docs/x.jpg', contentType: 'image/jpeg' },
fileUrl: '/api/documents/d1/file'
}
});
const img = await page.getByRole('img', { name: /original-scan/i }).element();
expect(img.getAttribute('src')).toBe('/api/documents/d1/file');
});
});

View File

@@ -1,5 +1,5 @@
<script lang="ts">
import { navigating } from '$app/state';
import { navigating } from '$app/stores';
import DashboardNeedsMetadata from './DashboardNeedsMetadata.svelte';
import UploadSuccessBanner from './UploadSuccessBanner.svelte';
@@ -18,7 +18,7 @@ interface Props {
let { topDocs, totalCount, bannerCount, onBannerClose }: Props = $props();
const showSkeleton = $derived(!!navigating.type && topDocs.length === 0);
const showSkeleton = $derived(!!$navigating && topDocs.length === 0);
const showBlock = $derived(topDocs.length > 0 || bannerCount > 0 || showSkeleton);
</script>

View File

@@ -2,23 +2,19 @@ import { describe, it, expect, afterEach, vi } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
// The store must live in a separate module because vi.mock factories are
// hoisted and cannot reference top-level variables defined in this file.
import { navigatingStore } from './__mocks__/navigatingStore';
import EnrichmentBlock from './EnrichmentBlock.svelte';
// Hoist the mutable navigation reference so vi.mock's factory (also hoisted)
// can read it via a getter. Sync factory, no dynamic import: ADR-012 invariant.
const { mockNavigating } = vi.hoisted(() => ({
mockNavigating: { type: null as string | null }
}));
vi.mock('$app/state', () => ({
get navigating() {
return mockNavigating;
}
}));
vi.mock('$app/stores', async () => {
const mod = await import('./__mocks__/navigatingStore');
return { navigating: mod.navigatingStore };
});
afterEach(() => {
cleanup();
mockNavigating.type = null;
navigatingStore.set(null);
});
type Doc = { id: string; title: string; uploadedAt: string };
@@ -69,8 +65,8 @@ describe('EnrichmentBlock', () => {
await expect.element(page.getByTestId('dashboard-needs-metadata')).toBeInTheDocument();
});
it('renders the skeleton when navigation is active and topDocs is empty', async () => {
mockNavigating.type = 'link';
it('renders the skeleton when $navigating is active and topDocs is empty', async () => {
navigatingStore.set({ type: 'link' });
render(EnrichmentBlock, {
topDocs: [],
totalCount: 0,
@@ -80,8 +76,8 @@ describe('EnrichmentBlock', () => {
await expect.element(page.getByTestId('enrichment-block-skeleton')).toBeInTheDocument();
});
it('does not render the skeleton when topDocs is non-empty even during navigation', async () => {
mockNavigating.type = 'link';
it('does not render the skeleton when topDocs is non-empty even during $navigating', async () => {
navigatingStore.set({ type: 'link' });
render(EnrichmentBlock, {
topDocs: [doc('d1')],
totalCount: 1,

View File

@@ -1,219 +0,0 @@
import { describe, it, expect, vi, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import FileSwitcherStrip from './FileSwitcherStrip.svelte';
afterEach(cleanup);
const makeEntry = (id: string, title: string, overrides: Record<string, unknown> = {}) => ({
id,
title,
status: 'idle' as 'idle' | 'error',
previewUrl: '',
...overrides
});
describe('FileSwitcherStrip', () => {
it('renders the prev and next buttons', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A.pdf')],
activeId: 'f1',
onSelect: () => {},
onRemove: () => {}
}
});
await expect.element(page.getByRole('button', { name: /vorherige datei/i })).toBeVisible();
await expect.element(page.getByRole('button', { name: /nächste datei/i })).toBeVisible();
});
it('renders one chip per file', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A.pdf'), makeEntry('f2', 'B.pdf'), makeEntry('f3', 'C.pdf')],
activeId: 'f1',
onSelect: () => {},
onRemove: () => {}
}
});
const chips = document.querySelectorAll('[data-chip-id]');
expect(chips.length).toBe(3);
});
it('marks the active chip with aria-current=true', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A'), makeEntry('f2', 'B')],
activeId: 'f2',
onSelect: () => {},
onRemove: () => {}
}
});
const f2 = document.querySelector('[data-chip-id="f2"]') as HTMLElement;
const f1 = document.querySelector('[data-chip-id="f1"]') as HTMLElement;
expect(f2.getAttribute('aria-current')).toBe('true');
expect(f1.getAttribute('aria-current')).toBeNull();
});
it('shows the error indicator on chips with status="error"', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A.pdf', { status: 'error' })],
activeId: 'f1',
onSelect: () => {},
onRemove: () => {}
}
});
const chip = document.querySelector('[data-chip-id="f1"]') as HTMLElement;
expect(chip.getAttribute('data-status')).toBe('error');
});
it('calls onSelect with the chip id when clicked', async () => {
const onSelect = vi.fn();
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A'), makeEntry('f2', 'B')],
activeId: 'f1',
onSelect,
onRemove: () => {}
}
});
const f2 = document.querySelector('[data-chip-id="f2"]') as HTMLElement;
f2.click();
expect(onSelect).toHaveBeenCalledWith('f2');
});
it('calls onRemove when the remove button is clicked', async () => {
const onRemove = vi.fn();
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A'), makeEntry('f2', 'B')],
activeId: 'f1',
onSelect: () => {},
onRemove
}
});
const remove = document.querySelector('[data-remove-id="f1"]') as HTMLElement;
remove.click();
expect(onRemove).toHaveBeenCalledWith('f1');
});
it('renders the active title in the sr-only announcer', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'Ein Brief.pdf'), makeEntry('f2', 'B')],
activeId: 'f1',
onSelect: () => {},
onRemove: () => {}
}
});
const announcer = document.querySelector('[aria-live="polite"]');
expect(announcer?.textContent).toContain('Ein Brief.pdf');
});
it('prev button on a single-file strip is a no-op (active chip stays)', async () => {
const onSelect = vi.fn();
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A.pdf')],
activeId: 'f1',
onSelect,
onRemove: () => {}
}
});
await page.getByRole('button', { name: /vorherige datei/i }).click();
// The active chip is still f1 and onSelect was not invoked with a different id.
expect(document.querySelector('[data-chip-id="f1"]')?.getAttribute('aria-current')).toBe(
'true'
);
expect(onSelect).not.toHaveBeenCalled();
});
it('next button on a single-file strip is a no-op (active chip stays)', async () => {
const onSelect = vi.fn();
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A.pdf')],
activeId: 'f1',
onSelect,
onRemove: () => {}
}
});
await page.getByRole('button', { name: /nächste datei/i }).click();
expect(document.querySelector('[data-chip-id="f1"]')?.getAttribute('aria-current')).toBe(
'true'
);
expect(onSelect).not.toHaveBeenCalled();
});
it('navigates with ArrowRight key on focused chip', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A'), makeEntry('f2', 'B'), makeEntry('f3', 'C')],
activeId: 'f1',
onSelect: () => {},
onRemove: () => {}
}
});
const f1 = document.querySelector('[data-chip-id="f1"]') as HTMLElement;
f1.focus();
f1.dispatchEvent(new KeyboardEvent('keydown', { key: 'ArrowRight', bubbles: true }));
await vi.waitFor(() => {
expect(document.activeElement?.getAttribute('data-chip-id')).toBe('f2');
});
});
it('navigates with ArrowLeft key on focused chip (wraps around)', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A'), makeEntry('f2', 'B')],
activeId: 'f1',
onSelect: () => {},
onRemove: () => {}
}
});
const f1 = document.querySelector('[data-chip-id="f1"]') as HTMLElement;
f1.focus();
f1.dispatchEvent(new KeyboardEvent('keydown', { key: 'ArrowLeft', bubbles: true }));
await vi.waitFor(() => {
// ArrowLeft from index 0 wraps to last (f2).
expect(document.activeElement?.getAttribute('data-chip-id')).toBe('f2');
});
});
it('ArrowDown is treated as ArrowRight (vertical key alias)', async () => {
render(FileSwitcherStrip, {
props: {
files: [makeEntry('f1', 'A'), makeEntry('f2', 'B')],
activeId: 'f1',
onSelect: () => {},
onRemove: () => {}
}
});
const f1 = document.querySelector('[data-chip-id="f1"]') as HTMLElement;
f1.focus();
f1.dispatchEvent(new KeyboardEvent('keydown', { key: 'ArrowDown', bubbles: true }));
await vi.waitFor(() => {
expect(document.activeElement?.getAttribute('data-chip-id')).toBe('f2');
});
});
});

View File

@@ -1,43 +0,0 @@
import { describe, it, expect, afterEach } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import ScriptTypeSelect from './ScriptTypeSelect.svelte';
afterEach(cleanup);
describe('ScriptTypeSelect', () => {
it('renders the label and select', async () => {
render(ScriptTypeSelect, { props: { value: '' } });
await expect.element(page.getByLabelText(/schrifttyp/i)).toBeVisible();
});
it('renders all four option values', async () => {
render(ScriptTypeSelect, { props: { value: '' } });
const options = document.querySelectorAll('option');
const values = Array.from(options).map((o) => (o as HTMLOptionElement).value);
expect(values).toEqual(['', 'TYPEWRITER', 'HANDWRITING_LATIN', 'HANDWRITING_KURRENT']);
});
it('marks the placeholder option as disabled', async () => {
render(ScriptTypeSelect, { props: { value: '' } });
const placeholder = document.querySelector('option[value=""]') as HTMLOptionElement;
expect(placeholder.disabled).toBe(true);
});
it('initialises the select with the supplied value', async () => {
render(ScriptTypeSelect, { props: { value: 'TYPEWRITER' } });
const select = (await page.getByRole('combobox').element()) as HTMLSelectElement;
expect(select.value).toBe('TYPEWRITER');
});
it('disables the select when the disabled prop is true', async () => {
render(ScriptTypeSelect, { props: { value: '', disabled: true } });
const select = (await page.getByRole('combobox').element()) as HTMLSelectElement;
expect(select.disabled).toBe(true);
});
});

Some files were not shown because too many files have changed in this diff Show More