Compare commits

..

1 Commits

Author SHA1 Message Date
Marcel
5ce0856178 fix(ci): add IMPORT_HOST_DIR stub to compose-idempotency env file
Some checks failed
CI / Unit & Component Tests (push) Failing after 2m37s
CI / OCR Service Tests (push) Successful in 18s
CI / Backend Unit Tests (push) Successful in 4m24s
CI / Compose Bucket Idempotency (push) Successful in 56s
CI / Unit & Component Tests (pull_request) Failing after 2m36s
CI / fail2ban Regex (push) Successful in 41s
CI / OCR Service Tests (pull_request) Successful in 17s
CI / Backend Unit Tests (pull_request) Successful in 4m20s
CI / fail2ban Regex (pull_request) Successful in 39s
CI / Compose Bucket Idempotency (pull_request) Successful in 57s
Docker Compose interpolates all variables in the full file even when
only a subset of services is requested. The backend service uses
IMPORT_HOST_DIR with :? (hard-required), causing the idempotency job
to abort before any container starts. A dummy path satisfies the parser;
the backend service is never started in this job so the path need not exist.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-05-14 10:57:30 +02:00
85 changed files with 353 additions and 21046 deletions

View File

@@ -26,46 +26,6 @@ PORT_MAILPIT_SMTP=1025
# Generate with: python3 -c "import secrets; print(secrets.token_hex(32))" # Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"
OCR_TRAINING_TOKEN=change-me-in-production OCR_TRAINING_TOKEN=change-me-in-production
# --- Observability ---
# Optional stack — start with: docker compose -f docker-compose.observability.yml up -d
# Requires the main stack to already be running (docker compose up -d creates archiv-net).
# In production the stack is managed from /opt/familienarchiv/ (see docs/DEPLOYMENT.md §4).
# Ports for host access
PORT_GRAFANA=3003
PORT_GLITCHTIP=3002
PORT_PROMETHEUS=9090
# Grafana admin password — change this before exposing Grafana beyond localhost
GRAFANA_ADMIN_PASSWORD=changeme
# GlitchTip domain — production: use https://glitchtip.archiv.raddatz.cloud (must match Caddy vhost)
GLITCHTIP_DOMAIN=http://localhost:3002
# GlitchTip secret key — Django SECRET_KEY equivalent, used to sign sessions and tokens.
# REQUIRED in production — must not be empty or 'changeme'. Fail-closed: GlitchTip will
# refuse to start with an invalid key.
# Generate with: python3 -c "import secrets; print(secrets.token_hex(50))"
GLITCHTIP_SECRET_KEY=changeme-generate-a-real-secret
# PostgreSQL hostname for GlitchTip's db-init job and workers.
# Override when only the staging stack is running (container name differs from archive-db).
# Default (archive-db) is correct for production with the full stack up.
POSTGRES_HOST=archive-db
# $$ escaping note: passwords in /opt/familienarchiv/.env that contain a literal '$' must
# use '$$' so Docker Compose does not expand them as variable references.
# Example: a password 'p@$$word' should be written as 'p@$$$$word' in the .env file.
# Error reporting DSNs — leave empty to disable the SDK (safe default).
# SENTRY_DSN: backend (Spring Boot) — used by the GlitchTip/Sentry Java SDK
SENTRY_DSN=
SENTRY_TRACES_SAMPLE_RATE=
# VITE_SENTRY_DSN: frontend (SvelteKit) — injected at build time via Vite
VITE_SENTRY_DSN=
# Sentry/GlitchTip auth token for source map upload at build time (optional)
SENTRY_AUTH_TOKEN=
# Production SMTP — uncomment and fill in to send real emails instead of catching them # Production SMTP — uncomment and fill in to send real emails instead of catching them
# APP_BASE_URL=https://your-domain.example.com # APP_BASE_URL=https://your-domain.example.com
# MAIL_HOST=smtp.example.com # MAIL_HOST=smtp.example.com

View File

@@ -2,7 +2,6 @@ name: CI
on: on:
push: push:
branches: [main]
pull_request: pull_request:
jobs: jobs:
@@ -33,10 +32,6 @@ jobs:
run: npx @inlang/paraglide-js compile --project ./project.inlang --outdir ./src/lib/paraglide run: npx @inlang/paraglide-js compile --project ./project.inlang --outdir ./src/lib/paraglide
working-directory: frontend working-directory: frontend
- name: Sync SvelteKit
run: npx svelte-kit sync
working-directory: frontend
- name: Lint - name: Lint
run: npm run lint run: npm run lint
working-directory: frontend working-directory: frontend
@@ -61,26 +56,6 @@ jobs:
exit 1 exit 1
fi fi
- name: Assert no (upload|download)-artifact past v3
shell: bash
run: |
# Self-test: verify the regex catches v4+ and does not catch v3.
tmp=$(mktemp)
printf ' uses: actions/upload-artifact@v5\n' > "$tmp"
grep -qP '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' "$tmp" \
|| { echo "FAIL: guard self-test — regex missed upload-artifact@v5"; rm "$tmp"; exit 1; }
printf ' uses: actions/upload-artifact@v3\n' > "$tmp"
grep -qvP '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' "$tmp" \
|| { echo "FAIL: guard self-test — regex incorrectly flagged upload-artifact@v3"; rm "$tmp"; exit 1; }
rm "$tmp"
# Guard: Gitea Actions (act_runner) does not implement the v4 artifact protocol.
# Both upload-artifact and download-artifact share the same incompatibility.
# Pin to @v3. See ADR-014 / #557.
if grep -RPn '^\s+uses:\s+actions/(upload|download)-artifact@v[4-9]' .gitea/workflows/; then
echo "::error::actions/(upload|download)-artifact@v4+ is unsupported on Gitea Actions (act_runner). Pin to @v3. See ADR-014 / #557."
exit 1
fi
- name: Run unit and component tests with coverage - name: Run unit and component tests with coverage
shell: bash shell: bash
run: | run: |
@@ -102,10 +77,9 @@ jobs:
exit 1 exit 1
fi fi
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- name: Upload coverage reports - name: Upload coverage reports
if: always() if: always()
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: coverage-reports name: coverage-reports
path: | path: |
@@ -139,10 +113,9 @@ jobs:
|| { echo "FAIL: /hilfe/transkription.html missing from prerender output"; exit 1; } || { echo "FAIL: /hilfe/transkription.html missing from prerender output"; exit 1; }
echo "PASS: only /hilfe/transkription.html prerendered." echo "PASS: only /hilfe/transkription.html prerendered."
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- name: Upload screenshots - name: Upload screenshots
if: always() if: always()
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: unit-test-screenshots name: unit-test-screenshots
path: frontend/test-results/screenshots/ path: frontend/test-results/screenshots/
@@ -197,14 +170,6 @@ jobs:
./mvnw clean test ./mvnw clean test
working-directory: backend working-directory: backend
- name: Upload surefire reports
if: always()
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
uses: actions/upload-artifact@v3
with:
name: surefire-reports
path: backend/target/surefire-reports/
# ─── fail2ban Regex Regression ──────────────────────────────────────────────── # ─── fail2ban Regex Regression ────────────────────────────────────────────────
# The filter parses Caddy's JSON access log; a Caddy upgrade that reorders # The filter parses Caddy's JSON access log; a Caddy upgrade that reorders
# the JSON keys would silently break it (fail2ban-regex would return # the JSON keys would silently break it (fail2ban-regex would return
@@ -305,7 +270,6 @@ jobs:
MAIL_PORT=1025 MAIL_PORT=1025
APP_MAIL_FROM=noreply@local APP_MAIL_FROM=noreply@local
IMPORT_HOST_DIR=/tmp/dummy-import IMPORT_HOST_DIR=/tmp/dummy-import
COMPOSE_NETWORK_NAME=test-idem-archiv-net
EOF EOF
- name: Bring up minio - name: Bring up minio

View File

@@ -56,10 +56,9 @@ jobs:
exit 1 exit 1
fi fi
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- name: Upload coverage log on failure - name: Upload coverage log on failure
if: failure() if: failure()
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: coverage-log-run-${{ matrix.run }} name: coverage-log-run-${{ matrix.run }}
path: /tmp/coverage-test-${{ github.run_id }}-${{ matrix.run }}.log path: /tmp/coverage-test-${{ github.run_id }}-${{ matrix.run }}.log

View File

@@ -30,9 +30,6 @@ name: nightly
# STAGING_OCR_TRAINING_TOKEN # STAGING_OCR_TRAINING_TOKEN
# STAGING_APP_ADMIN_USERNAME # STAGING_APP_ADMIN_USERNAME
# STAGING_APP_ADMIN_PASSWORD # STAGING_APP_ADMIN_PASSWORD
# GRAFANA_ADMIN_PASSWORD
# GLITCHTIP_SECRET_KEY
# SENTRY_DSN (set after GlitchTip first-run; empty = Sentry disabled)
on: on:
schedule: schedule:
@@ -77,8 +74,6 @@ jobs:
MAIL_STARTTLS_ENABLE=false MAIL_STARTTLS_ENABLE=false
APP_MAIL_FROM=noreply@staging.raddatz.cloud APP_MAIL_FROM=noreply@staging.raddatz.cloud
IMPORT_HOST_DIR=/srv/familienarchiv-staging/import IMPORT_HOST_DIR=/srv/familienarchiv-staging/import
POSTGRES_USER=archiv
SENTRY_DSN=${{ secrets.SENTRY_DSN }}
EOF EOF
- name: Verify backend /import:ro mount is wired - name: Verify backend /import:ro mount is wired
@@ -125,77 +120,6 @@ jobs:
--profile staging \ --profile staging \
up -d --wait --remove-orphans up -d --wait --remove-orphans
- name: Deploy observability configs
# Copies the compose file and config tree from the workspace checkout
# into /opt/familienarchiv/ — the permanent location that persists
# between CI runs. Containers started in the next step bind-mount
# from there, so a future workspace wipe cannot corrupt a running
# config file.
#
# obs-secrets.env is written fresh from Gitea secrets on every run so
# Gitea is always the single source of truth for secret rotation.
# Non-secret config lives in infra/observability/obs.env (tracked in git).
run: |
rm -rf /opt/familienarchiv/infra/observability
mkdir -p /opt/familienarchiv/infra/observability
cp -r infra/observability/. /opt/familienarchiv/infra/observability/
cp docker-compose.observability.yml /opt/familienarchiv/
cat > /opt/familienarchiv/obs-secrets.env <<'EOF'
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
POSTGRES_PASSWORD=${{ secrets.STAGING_POSTGRES_PASSWORD }}
POSTGRES_HOST=archiv-staging-db-1
EOF
# Note: POSTGRES_HOST is derived from the Compose project name (archiv-staging)
# and service name (db). A project rename requires updating this value.
chmod 600 /opt/familienarchiv/obs-secrets.env
- name: Validate observability compose config
# Dry-run: resolves all variable substitutions and reports any missing
# required keys before containers start. Catches undefined variables and
# YAML errors in config files updated by the previous step.
# --env-file order: obs.env first (git-tracked defaults), obs-secrets.env
# second (CI-written secrets). Later files win on duplicate keys, so
# obs-secrets.env overrides POSTGRES_HOST set in obs.env.
run: |
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
config --quiet
- name: Start observability stack
# Runs with absolute paths so bind mounts resolve to stable host paths
# that survive workspace wipes between nightly runs (see ADR-016).
# Non-secret config from obs.env (git-tracked); secrets from obs-secrets.env
# (written fresh from Gitea secrets above). --env-file order: obs.env first,
# obs-secrets.env second — later file wins on duplicate keys.
run: |
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
up -d --wait --remove-orphans
- name: Assert observability stack health
# docker compose up --wait covers services WITH healthcheck directives only.
# obs-promtail, obs-cadvisor, obs-node-exporter, and obs-glitchtip-worker have
# no healthcheck — they are considered "started" as soon as the process runs.
# This step explicitly asserts the five healthchecked critical services are
# healthy before the smoke test proceeds.
run: |
set -e
unhealthy=""
for svc in obs-loki obs-prometheus obs-grafana obs-tempo obs-glitchtip; do
status=$(docker inspect "$svc" --format '{{.State.Health.Status}}' 2>/dev/null || echo "missing")
if [ "$status" != "healthy" ]; then
echo "::error::$svc is not healthy (status: $status)"
unhealthy="$unhealthy $svc"
fi
done
[ -z "$unhealthy" ] || exit 1
echo "All critical observability services are healthy"
- name: Reload Caddy - name: Reload Caddy
# Apply any committed Caddyfile changes before smoke-testing the # Apply any committed Caddyfile changes before smoke-testing the
# public surface. Without this step, a Caddyfile edit lands in the # public surface. Without this step, a Caddyfile edit lands in the

View File

@@ -34,9 +34,6 @@ name: release
# MAIL_PORT # MAIL_PORT
# MAIL_USERNAME # MAIL_USERNAME
# MAIL_PASSWORD # MAIL_PASSWORD
# GRAFANA_ADMIN_PASSWORD
# GLITCHTIP_SECRET_KEY
# SENTRY_DSN (set after GlitchTip first-run; empty = Sentry disabled)
on: on:
push: push:
@@ -75,8 +72,6 @@ jobs:
MAIL_STARTTLS_ENABLE=true MAIL_STARTTLS_ENABLE=true
APP_MAIL_FROM=noreply@raddatz.cloud APP_MAIL_FROM=noreply@raddatz.cloud
IMPORT_HOST_DIR=/srv/familienarchiv-production/import IMPORT_HOST_DIR=/srv/familienarchiv-production/import
POSTGRES_USER=archiv
SENTRY_DSN=${{ secrets.SENTRY_DSN }}
EOF EOF
- name: Build images - name: Build images
@@ -98,75 +93,6 @@ jobs:
--env-file .env.production \ --env-file .env.production \
up -d --wait --remove-orphans up -d --wait --remove-orphans
- name: Deploy observability configs
# Mirrors the nightly approach: copies obs compose file and config tree
# to /opt/familienarchiv/ (permanent path, survives workspace wipes — ADR-016),
# then writes obs-secrets.env fresh from Gitea secrets.
# Non-secret config lives in infra/observability/obs.env (tracked in git).
run: |
rm -rf /opt/familienarchiv/infra/observability
mkdir -p /opt/familienarchiv/infra/observability
cp -r infra/observability/. /opt/familienarchiv/infra/observability/
cp docker-compose.observability.yml /opt/familienarchiv/
cat > /opt/familienarchiv/obs-secrets.env <<'EOF'
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
POSTGRES_PASSWORD=${{ secrets.PROD_POSTGRES_PASSWORD }}
POSTGRES_HOST=archiv-production-db-1
EOF
# Note: POSTGRES_HOST is derived from the Compose project name (archiv-production)
# and service name (db). A project rename requires updating this value.
chmod 600 /opt/familienarchiv/obs-secrets.env
- name: Validate observability compose config
# Dry-run: resolves all variable substitutions and reports any missing
# required keys before containers start. Catches undefined variables and
# YAML errors in config files updated by the previous step.
# --env-file order: obs.env first (git-tracked defaults), obs-secrets.env
# second (CI-written secrets). Later files win on duplicate keys, so
# obs-secrets.env overrides POSTGRES_HOST set in obs.env.
# Keep in sync with the equivalent step in nightly.yml (#603).
run: |
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
config --quiet
- name: Start observability stack
# Runs with absolute paths so bind mounts resolve to stable host paths
# that survive workspace wipes between runs (see ADR-016).
# Non-secret config from obs.env (git-tracked); secrets from obs-secrets.env
# (written fresh from Gitea secrets above). --env-file order: obs.env first,
# obs-secrets.env second — later file wins on duplicate keys.
# Keep in sync with the equivalent step in nightly.yml (#603).
run: |
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
up -d --wait --remove-orphans
- name: Assert observability stack health
# docker compose up --wait covers services WITH healthcheck directives only.
# obs-promtail, obs-cadvisor, obs-node-exporter, and obs-glitchtip-worker have
# no healthcheck — they are considered "started" as soon as the process runs.
# This step explicitly asserts the five healthchecked critical services are
# healthy before the smoke test proceeds.
# Keep in sync with the equivalent step in nightly.yml (#603).
run: |
set -e
unhealthy=""
for svc in obs-loki obs-prometheus obs-grafana obs-tempo obs-glitchtip; do
status=$(docker inspect "$svc" --format '{{.State.Health.Status}}' 2>/dev/null || echo "missing")
if [ "$status" != "healthy" ]; then
echo "::error::$svc is not healthy (status: $status)"
unhealthy="$unhealthy $svc"
fi
done
[ -z "$unhealthy" ] || exit 1
echo "All critical observability services are healthy"
- name: Reload Caddy - name: Reload Caddy
# See nightly.yml — same rationale and mechanism: DooD job containers # See nightly.yml — same rationale and mechanism: DooD job containers
# cannot call systemctl directly; nsenter via a privileged sibling # cannot call systemctl directly; nsenter via a privileged sibling

View File

@@ -159,7 +159,7 @@ Input DTOs live flat in the domain package. Response types are the model entitie
→ See [CONTRIBUTING.md §Error handling](./CONTRIBUTING.md#error-handling) → See [CONTRIBUTING.md §Error handling](./CONTRIBUTING.md#error-handling)
**LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` from service methods — never throw raw exceptions. When adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) add to `ErrorCode` type in `frontend/src/lib/shared/errors.ts`, (3) add a `case` in `getErrorMessage()`, (4) add i18n keys in `messages/{de,en,es}.json`. **LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` from service methods — never throw raw exceptions. When adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) mirror in `frontend/src/lib/shared/errors.ts`, (3) add i18n keys in `messages/{de,en,es}.json`.
### Security / Permissions ### Security / Permissions
@@ -274,31 +274,6 @@ Back button pattern — use the shared `<BackButton>` component from `$lib/share
→ See [docs/DEPLOYMENT.md](./docs/DEPLOYMENT.md) → See [docs/DEPLOYMENT.md](./docs/DEPLOYMENT.md)
### Observability stack (separate compose file)
Run via `docker-compose.observability.yml` — requires the main stack to be running first. Full setup procedure: [docs/DEPLOYMENT.md §4](./docs/DEPLOYMENT.md#4-logs--observability).
| Service | Container | Default Port | Purpose |
|---------|-----------|-------------|---------|
| Grafana | `obs-grafana` | 3003 | Metrics / logs / traces dashboard |
| Prometheus | `obs-prometheus` | 9090 (dev only — `127.0.0.1` bound) | Metrics store |
| Loki | `obs-loki` | — (internal) | Log store |
| Tempo | `obs-tempo` | — (internal) | Trace store |
| GlitchTip | `obs-glitchtip` | 3002 | Error tracking (Sentry-compatible) |
### Observability env vars
| Variable | Purpose |
|----------|---------|
| `PORT_GRAFANA` | Host port for Grafana UI (default: `3003`) |
| `PORT_GLITCHTIP` | Host port for GlitchTip UI (default: `3002`) |
| `PORT_PROMETHEUS` | Host port for Prometheus UI (default: `9090`) |
| `GRAFANA_ADMIN_PASSWORD` | Grafana `admin` login password — generate with `openssl rand -hex 32` |
| `GLITCHTIP_SECRET_KEY` | Django secret key for GlitchTip — generate with `python3 -c "import secrets; print(secrets.token_hex(32))"` |
| `GLITCHTIP_DOMAIN` | Public-facing base URL for GlitchTip (email links, CORS), e.g. `https://glitchtip.example.com` |
| `SENTRY_DSN` | GlitchTip/Sentry DSN for the backend (Spring Boot) — leave empty to disable |
| `VITE_SENTRY_DSN` | GlitchTip/Sentry DSN for the frontend (SvelteKit) — injected at build time via Vite |
## API Testing ## API Testing
HTTP test files are in `backend/api_tests/` for use with the VS Code REST Client extension. HTTP test files are in `backend/api_tests/` for use with the VS Code REST Client extension.

View File

@@ -29,20 +29,6 @@
<properties> <properties>
<java.version>21</java.version> <java.version>21</java.version>
</properties> </properties>
<dependencyManagement>
<dependencies>
<!-- opentelemetry-spring-boot-starter:2.27.0 was built against opentelemetry-api:1.61.0,
but Spring Boot 4.0.0 BOM only manages 1.55.0 (missing GlobalOpenTelemetry.getOrNoop()).
Import the core OTel BOM here to override it before the Spring Boot BOM applies. -->
<dependency>
<groupId>io.opentelemetry</groupId>
<artifactId>opentelemetry-bom</artifactId>
<version>1.61.0</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.springframework.boot</groupId> <groupId>org.springframework.boot</groupId>
@@ -211,42 +197,6 @@
<artifactId>jsoup</artifactId> <artifactId>jsoup</artifactId>
<version>1.18.1</version> <version>1.18.1</version>
</dependency> </dependency>
<!-- Observability: Prometheus metrics scrape endpoint (version managed by Spring Boot BOM) -->
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-prometheus</artifactId>
</dependency>
<!-- Observability: Micrometer → OpenTelemetry tracing bridge (version managed by Spring Boot BOM) -->
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-tracing-bridge-otel</artifactId>
</dependency>
<!-- Observability: OTel Spring Boot auto-instrumentation — NOT in Spring Boot BOM, pinned explicitly -->
<dependency>
<groupId>io.opentelemetry.instrumentation</groupId>
<artifactId>opentelemetry-spring-boot-starter</artifactId>
<version>2.27.0</version>
<exclusions>
<!-- Excludes AzureAppServiceResourceProvider which references ServiceAttributes.SERVICE_INSTANCE_ID
that does not exist in the semconv version pulled by this project. -->
<exclusion>
<groupId>io.opentelemetry.contrib</groupId>
<artifactId>opentelemetry-azure-resources</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Sentry error reporting (GlitchTip-compatible) — sentry-spring-boot-4 is the
Spring Boot 4 / Spring Framework 7 compatible module (replaces the jakarta starter
which crashes with SF7 due to bean-name generation for triply-nested @Import classes) -->
<dependency>
<groupId>io.sentry</groupId>
<artifactId>sentry-spring-boot-4</artifactId>
<version>8.41.0</version>
</dependency>
</dependencies> </dependencies>
@@ -323,16 +273,6 @@
</profiles> </profiles>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkedProcessTimeoutInSeconds>600</forkedProcessTimeoutInSeconds>
<systemPropertyVariables>
<junit.jupiter.execution.timeout.default>90 s</junit.jupiter.execution.timeout.default>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins> </plugins>
</build> </build>

View File

@@ -30,8 +30,6 @@ public enum ErrorCode {
// --- Users --- // --- Users ---
/** A user with the given ID or username does not exist. 404 */ /** A user with the given ID or username does not exist. 404 */
USER_NOT_FOUND, USER_NOT_FOUND,
/** A group with the given ID does not exist. 404 */
GROUP_NOT_FOUND,
/** The supplied email address is already used by another account. 409 */ /** The supplied email address is already used by another account. 409 */
EMAIL_ALREADY_IN_USE, EMAIL_ALREADY_IN_USE,
/** The supplied current password does not match the stored hash. 400 */ /** The supplied current password does not match the stored hash. 400 */
@@ -54,8 +52,6 @@ public enum ErrorCode {
INVITE_REVOKED, INVITE_REVOKED,
/** The invite has passed its expiry date. 410 */ /** The invite has passed its expiry date. 410 */
INVITE_EXPIRED, INVITE_EXPIRED,
/** A group cannot be deleted because one or more active invites reference it. 409 */
GROUP_HAS_ACTIVE_INVITES,
// --- Auth --- // --- Auth ---
/** The request is not authenticated. 401 */ /** The request is not authenticated. 401 */

View File

@@ -2,7 +2,6 @@ package org.raddatz.familienarchiv.exception;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import io.sentry.Sentry;
import jakarta.validation.ConstraintViolationException; import jakarta.validation.ConstraintViolationException;
import org.raddatz.familienarchiv.exception.DomainException; import org.raddatz.familienarchiv.exception.DomainException;
import org.raddatz.familienarchiv.exception.ErrorCode; import org.raddatz.familienarchiv.exception.ErrorCode;
@@ -64,7 +63,6 @@ public class GlobalExceptionHandler {
@ExceptionHandler(Exception.class) @ExceptionHandler(Exception.class)
public ResponseEntity<ErrorResponse> handleGeneric(Exception ex) { public ResponseEntity<ErrorResponse> handleGeneric(Exception ex) {
Sentry.captureException(ex);
log.error("Unhandled exception", ex); log.error("Unhandled exception", ex);
return ResponseEntity.internalServerError() return ResponseEntity.internalServerError()
.body(new ErrorResponse(ErrorCode.INTERNAL_ERROR, "An unexpected error occurred")); .body(new ErrorResponse(ErrorCode.INTERNAL_ERROR, "An unexpected error occurred"));

View File

@@ -1,6 +1,5 @@
package org.raddatz.familienarchiv.importing; package org.raddatz.familienarchiv.importing;
import com.fasterxml.jackson.annotation.JsonIgnore;
import lombok.RequiredArgsConstructor; import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
import org.apache.poi.ss.usermodel.*; import org.apache.poi.ss.usermodel.*;
@@ -53,9 +52,9 @@ public class MassImportService {
public enum State { IDLE, RUNNING, DONE, FAILED } public enum State { IDLE, RUNNING, DONE, FAILED }
public record ImportStatus(State state, String statusCode, @JsonIgnore String message, int processed, LocalDateTime startedAt) {} public record ImportStatus(State state, String message, int processed, LocalDateTime startedAt) {}
private volatile ImportStatus currentStatus = new ImportStatus(State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null); private volatile ImportStatus currentStatus = new ImportStatus(State.IDLE, "Kein Import gestartet.", 0, null);
public ImportStatus getStatus() { public ImportStatus getStatus() {
return currentStatus; return currentStatus;
@@ -117,29 +116,20 @@ public class MassImportService {
if (currentStatus.state() == State.RUNNING) { if (currentStatus.state() == State.RUNNING) {
throw DomainException.conflict(ErrorCode.IMPORT_ALREADY_RUNNING, "A mass import is already in progress"); throw DomainException.conflict(ErrorCode.IMPORT_ALREADY_RUNNING, "A mass import is already in progress");
} }
currentStatus = new ImportStatus(State.RUNNING, "IMPORT_RUNNING", "Import läuft...", 0, LocalDateTime.now()); currentStatus = new ImportStatus(State.RUNNING, "Import läuft...", 0, LocalDateTime.now());
try { try {
File spreadsheet = findSpreadsheetFile(); File spreadsheet = findSpreadsheetFile();
log.info("Starte Massenimport aus: {}", spreadsheet.getAbsolutePath()); log.info("Starte Massenimport aus: {}", spreadsheet.getAbsolutePath());
int processed = processRows(readSpreadsheet(spreadsheet)); int processed = processRows(readSpreadsheet(spreadsheet));
currentStatus = new ImportStatus(State.DONE, "IMPORT_DONE", currentStatus = new ImportStatus(State.DONE,
"Import abgeschlossen. " + processed + " Dokumente verarbeitet.", "Import abgeschlossen. " + processed + " Dokumente verarbeitet.",
processed, currentStatus.startedAt()); processed, currentStatus.startedAt());
} catch (NoSpreadsheetException e) {
log.error("Massenimport fehlgeschlagen: keine Tabellendatei", e);
currentStatus = new ImportStatus(State.FAILED, "IMPORT_FAILED_NO_SPREADSHEET",
"Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
} catch (Exception e) { } catch (Exception e) {
log.error("Massenimport fehlgeschlagen", e); log.error("Massenimport fehlgeschlagen", e);
currentStatus = new ImportStatus(State.FAILED, "IMPORT_FAILED_INTERNAL", currentStatus = new ImportStatus(State.FAILED, "Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
"Fehler: " + e.getMessage(), 0, currentStatus.startedAt());
} }
} }
private static class NoSpreadsheetException extends RuntimeException {
NoSpreadsheetException(String message) { super(message); }
}
private File findSpreadsheetFile() throws IOException { private File findSpreadsheetFile() throws IOException {
try (Stream<Path> files = Files.list(Paths.get(importDir))) { try (Stream<Path> files = Files.list(Paths.get(importDir))) {
return files return files
@@ -148,7 +138,7 @@ public class MassImportService {
return name.endsWith(".ods") || name.endsWith(".xlsx") || name.endsWith(".xls"); return name.endsWith(".ods") || name.endsWith(".xlsx") || name.endsWith(".xls");
}) })
.findFirst() .findFirst()
.orElseThrow(() -> new NoSpreadsheetException( .orElseThrow(() -> new RuntimeException(
"Keine Tabellendatei (.ods/.xlsx/.xls) in " + importDir + " gefunden!")) "Keine Tabellendatei (.ods/.xlsx/.xls) in " + importDir + " gefunden!"))
.toFile(); .toFile();
} }

View File

@@ -52,11 +52,7 @@ public class InviteService {
public InviteToken createInvite(CreateInviteRequest dto, AppUser creator) { public InviteToken createInvite(CreateInviteRequest dto, AppUser creator) {
Set<UUID> groupIds = new HashSet<>(); Set<UUID> groupIds = new HashSet<>();
if (dto.getGroupIds() != null && !dto.getGroupIds().isEmpty()) { if (dto.getGroupIds() != null && !dto.getGroupIds().isEmpty()) {
Set<UUID> uniqueIds = new HashSet<>(dto.getGroupIds()); List<UserGroup> groups = userService.findGroupsByIds(dto.getGroupIds());
List<UserGroup> groups = userService.findGroupsByIds(new ArrayList<>(uniqueIds));
if (groups.size() != uniqueIds.size()) {
throw DomainException.notFound(ErrorCode.GROUP_NOT_FOUND, "One or more group IDs do not exist");
}
groups.forEach(g -> groupIds.add(g.getId())); groups.forEach(g -> groupIds.add(g.getId()));
} }

View File

@@ -24,7 +24,4 @@ public interface InviteTokenRepository extends JpaRepository<InviteToken, UUID>
@Query("SELECT t FROM InviteToken t ORDER BY t.createdAt DESC") @Query("SELECT t FROM InviteToken t ORDER BY t.createdAt DESC")
List<InviteToken> findAllOrderedByCreatedAt(); List<InviteToken> findAllOrderedByCreatedAt();
@Query("SELECT CASE WHEN COUNT(t) > 0 THEN true ELSE false END FROM InviteToken t JOIN t.groupIds g WHERE g = :groupId AND t.revoked = false AND (t.expiresAt IS NULL OR t.expiresAt > CURRENT_TIMESTAMP) AND (t.maxUses IS NULL OR t.useCount < t.maxUses)")
boolean existsActiveWithGroupId(@Param("groupId") UUID groupId);
} }

View File

@@ -37,9 +37,6 @@ public class UserService {
private final AppUserRepository userRepository; private final AppUserRepository userRepository;
private final UserGroupRepository groupRepository; private final UserGroupRepository groupRepository;
// Injected directly (not via InviteService) to avoid a constructor injection cycle:
// InviteService → UserService → InviteService. Spring Framework 7 forbids such cycles.
private final InviteTokenRepository inviteTokenRepository;
private final PasswordEncoder passwordEncoder; private final PasswordEncoder passwordEncoder;
private final AuditService auditService; private final AuditService auditService;
@@ -291,10 +288,6 @@ public class UserService {
@Transactional @Transactional
public void deleteGroup(UUID id) { public void deleteGroup(UUID id) {
if (inviteTokenRepository.existsActiveWithGroupId(id)) {
throw DomainException.conflict(ErrorCode.GROUP_HAS_ACTIVE_INVITES,
"Cannot delete group " + id + " — referenced by one or more active invites");
}
groupRepository.deleteById(id); groupRepository.deleteById(id);
} }
} }

View File

@@ -45,34 +45,9 @@ server:
forward-headers-strategy: native forward-headers-strategy: native
management: management:
server:
# Management port is separate from the app port so that:
# (a) Caddy never proxies /actuator/* (it only routes :8080 → the app port)
# (b) Prometheus scrapes backend:8081 directly inside archiv-net, not via Caddy
# (c) Spring Security's session-authenticated filter chain on :8080 never sees actuator requests
port: 8081
endpoints:
web:
exposure:
include: health,info,prometheus,metrics
endpoint:
prometheus:
enabled: true
health: health:
mail: mail:
enabled: false enabled: false
tracing:
sampling:
probability: 1.0 # 100% in dev; override via MANAGEMENT_TRACING_SAMPLING_PROBABILITY in prod compose
# OpenTelemetry trace export — failures are non-fatal (app starts cleanly without Tempo running)
# The default http://localhost:4317 ensures CI compatibility when no observability stack is present.
otel:
service:
name: familienarchiv-backend
exporter:
otlp:
endpoint: ${OTEL_EXPORTER_OTLP_ENDPOINT:http://localhost:4317}
springdoc: springdoc:
api-docs: api-docs:
@@ -118,12 +93,3 @@ ocr:
sender-model: sender-model:
activation-threshold: 100 activation-threshold: 100
retrain-delta: 50 retrain-delta: 50
sentry:
dsn: ${SENTRY_DSN:}
environment: ${SPRING_PROFILES_ACTIVE:dev}
traces-sample-rate: ${SENTRY_TRACES_SAMPLE_RATE:1.0}
send-default-pii: false
enable-tracing: true
ignored-exceptions-for-type:
- org.raddatz.familienarchiv.exception.DomainException

View File

@@ -1,3 +0,0 @@
-- The composite PK (invite_token_id, group_id) does not support efficient lookups by group_id alone.
-- Add a dedicated index to support existsActiveWithGroupId queries.
CREATE INDEX idx_itg_group_id ON invite_token_group_ids (group_id);

View File

@@ -1,18 +1,14 @@
package org.raddatz.familienarchiv; package org.raddatz.familienarchiv;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.testcontainers.service.connection.ServiceConnection; import org.springframework.boot.testcontainers.service.connection.ServiceConnection;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Import;
import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.bean.override.mockito.MockitoBean; import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.testcontainers.containers.PostgreSQLContainer; import org.testcontainers.containers.PostgreSQLContainer;
import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Client;
import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE) @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
@ActiveProfiles("test") @ActiveProfiles("test")
@Import(PostgresContainerConfig.class) @Import(PostgresContainerConfig.class)
@@ -21,18 +17,9 @@ class ApplicationContextTest {
@MockitoBean @MockitoBean
S3Client s3Client; S3Client s3Client;
@Autowired
ApplicationContext ctx;
@Test @Test
void contextLoads() { void contextLoads() {
// verifies that the Spring context starts successfully with all beans wired, // verifies that the Spring context starts successfully with all beans wired,
// Flyway migrations applied, and no configuration errors // Flyway migrations applied, and no configuration errors
} }
@Test
void sentry_is_disabled_when_no_dsn_is_configured() {
// application-test.yaml has no sentry.dsn — SDK must stay inactive so tests are clean
assertThat(io.sentry.Sentry.isEnabled()).isFalse();
}
} }

View File

@@ -1,11 +1,11 @@
package org.raddatz.familienarchiv.audit; package org.raddatz.familienarchiv.audit;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.raddatz.familienarchiv.PostgresContainerConfig; import org.raddatz.familienarchiv.PostgresContainerConfig;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Import;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.bean.override.mockito.MockitoBean; import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.springframework.transaction.support.TransactionTemplate; import org.springframework.transaction.support.TransactionTemplate;
@@ -18,6 +18,7 @@ import static org.awaitility.Awaitility.await;
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE) @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
@ActiveProfiles("test") @ActiveProfiles("test")
@Import(PostgresContainerConfig.class) @Import(PostgresContainerConfig.class)
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
class AuditServiceIntegrationTest { class AuditServiceIntegrationTest {
@MockitoBean S3Client s3Client; @MockitoBean S3Client s3Client;
@@ -25,11 +26,6 @@ class AuditServiceIntegrationTest {
@Autowired AuditLogRepository auditLogRepository; @Autowired AuditLogRepository auditLogRepository;
@Autowired TransactionTemplate transactionTemplate; @Autowired TransactionTemplate transactionTemplate;
@BeforeEach
void resetAuditLog() {
auditLogRepository.deleteAll();
}
@Test @Test
void logAfterCommit_writes_ANNOTATION_CREATED_row_after_transaction_commits() { void logAfterCommit_writes_ANNOTATION_CREATED_row_after_transaction_commits() {
transactionTemplate.execute(status -> { transactionTemplate.execute(status -> {

View File

@@ -12,9 +12,9 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Import;
import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.PageRequest;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.bean.override.mockito.MockitoBean; import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.springframework.transaction.annotation.Transactional;
import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Client;
import java.time.LocalDate; import java.time.LocalDate;
@@ -33,7 +33,7 @@ import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE) @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
@ActiveProfiles("test") @ActiveProfiles("test")
@Import(PostgresContainerConfig.class) @Import(PostgresContainerConfig.class)
@Transactional @DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
class DocumentSearchPagedIntegrationTest { class DocumentSearchPagedIntegrationTest {
private static final int FIXTURE_SIZE = 120; private static final int FIXTURE_SIZE = 120;

View File

@@ -1,33 +0,0 @@
package org.raddatz.familienarchiv.exception;
import io.sentry.Sentry;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.MockedStatic;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.http.ResponseEntity;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mockStatic;
@ExtendWith(MockitoExtension.class)
class GlobalExceptionHandlerTest {
@InjectMocks
private GlobalExceptionHandler handler;
@Test
void handleGeneric_captures_exception_in_sentry_and_returns_500() {
RuntimeException ex = new RuntimeException("unexpected failure");
try (MockedStatic<Sentry> sentryMock = mockStatic(Sentry.class)) {
ResponseEntity<GlobalExceptionHandler.ErrorResponse> response = handler.handleGeneric(ex);
sentryMock.verify(() -> Sentry.captureException(ex));
assertThat(response.getStatusCode().value()).isEqualTo(500);
assertThat(response.getBody()).isNotNull();
assertThat(response.getBody().code()).isEqualTo(ErrorCode.INTERNAL_ERROR);
}
}
}

View File

@@ -19,9 +19,9 @@ import org.springframework.context.annotation.Import;
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken; import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
import org.springframework.security.core.authority.SimpleGrantedAuthority; import org.springframework.security.core.authority.SimpleGrantedAuthority;
import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.bean.override.mockito.MockitoBean; import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.springframework.transaction.annotation.Transactional;
import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Client;
import java.util.List; import java.util.List;
@@ -32,7 +32,7 @@ import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE) @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
@ActiveProfiles("test") @ActiveProfiles("test")
@Import(PostgresContainerConfig.class) @Import(PostgresContainerConfig.class)
@Transactional @DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
class GeschichteServiceIntegrationTest { class GeschichteServiceIntegrationTest {
@MockitoBean @MockitoBean

View File

@@ -20,10 +20,7 @@ import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import java.io.File; import java.io.File;
import java.io.OutputStream;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.time.LocalDate; import java.time.LocalDate;
@@ -73,20 +70,14 @@ class MassImportServiceTest {
assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.IDLE); assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.IDLE);
} }
@Test
void getStatus_hasStatusCode_IMPORT_IDLE_byDefault() {
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_IDLE");
}
// ─── runImportAsync ─────────────────────────────────────────────────────── // ─── runImportAsync ───────────────────────────────────────────────────────
@Test @Test
void runImportAsync_setsFailedStatus_whenImportDirectoryDoesNotExist() { void runImportAsync_setsFailedStatus_whenImportDirectoryDoesNotExist() {
// /import directory doesn't exist in test environment → IOException → IMPORT_FAILED_INTERNAL // /import directory doesn't exist in test environment → findSpreadsheetFile throws
service.runImportAsync(); service.runImportAsync();
assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.FAILED); assertThat(service.getStatus().state()).isEqualTo(MassImportService.State.FAILED);
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_FAILED_INTERNAL");
} }
@Test @Test
@@ -102,35 +93,10 @@ class MassImportServiceTest {
assertThat(service.getStatus().message()).contains(tempDir.toString()); assertThat(service.getStatus().message()).contains(tempDir.toString());
} }
@Test
void runImportAsync_setsStatusCode_IMPORT_FAILED_NO_SPREADSHEET_whenDirIsEmpty(@TempDir Path tempDir) {
ReflectionTestUtils.setField(service, "importDir", tempDir.toString());
service.runImportAsync();
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_FAILED_NO_SPREADSHEET");
}
@Test
void runImportAsync_setsStatusCode_IMPORT_DONE_whenSpreadsheetHasNoDataRows(@TempDir Path tempDir) throws Exception {
Path xlsx = tempDir.resolve("import.xlsx");
try (XSSFWorkbook wb = new XSSFWorkbook()) {
wb.createSheet("Sheet1");
try (OutputStream out = Files.newOutputStream(xlsx)) {
wb.write(out);
}
}
ReflectionTestUtils.setField(service, "importDir", tempDir.toString());
service.runImportAsync();
assertThat(service.getStatus().statusCode()).isEqualTo("IMPORT_DONE");
}
@Test @Test
void runImportAsync_throwsConflict_whenAlreadyRunning() { void runImportAsync_throwsConflict_whenAlreadyRunning() {
MassImportService.ImportStatus running = new MassImportService.ImportStatus( MassImportService.ImportStatus running = new MassImportService.ImportStatus(
MassImportService.State.RUNNING, "IMPORT_RUNNING", "Running...", 0, LocalDateTime.now()); MassImportService.State.RUNNING, "Running...", 0, LocalDateTime.now());
ReflectionTestUtils.setField(service, "currentStatus", running); ReflectionTestUtils.setField(service, "currentStatus", running);
assertThatThrownBy(() -> service.runImportAsync()) assertThatThrownBy(() -> service.runImportAsync())

View File

@@ -8,9 +8,9 @@ import org.raddatz.familienarchiv.person.PersonRepository;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Import;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.bean.override.mockito.MockitoBean; import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.springframework.transaction.annotation.Transactional;
import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Client;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
@@ -18,7 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE) @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
@ActiveProfiles("test") @ActiveProfiles("test")
@Import(PostgresContainerConfig.class) @Import(PostgresContainerConfig.class)
@Transactional @DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_EACH_TEST_METHOD)
class PersonServiceIntegrationTest { class PersonServiceIntegrationTest {
@MockitoBean S3Client s3Client; @MockitoBean S3Client s3Client;

View File

@@ -40,47 +40,6 @@ class AdminControllerTest {
@MockitoBean ThumbnailBackfillService thumbnailBackfillService; @MockitoBean ThumbnailBackfillService thumbnailBackfillService;
@MockitoBean CustomUserDetailsService customUserDetailsService; @MockitoBean CustomUserDetailsService customUserDetailsService;
// ─── GET /api/admin/import-status ─────────────────────────────────────────
@Test
@WithMockUser(authorities = "ADMIN")
void importStatus_returns200_withStatusCode_whenAdmin() throws Exception {
MassImportService.ImportStatus status = new MassImportService.ImportStatus(
MassImportService.State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
when(massImportService.getStatus()).thenReturn(status);
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.state").value("IDLE"))
.andExpect(jsonPath("$.statusCode").value("IMPORT_IDLE"))
.andExpect(jsonPath("$.processed").value(0));
}
@Test
@WithMockUser(authorities = "ADMIN")
void importStatus_messageField_notPresentInApiResponse() throws Exception {
MassImportService.ImportStatus status = new MassImportService.ImportStatus(
MassImportService.State.IDLE, "IMPORT_IDLE", "Kein Import gestartet.", 0, null);
when(massImportService.getStatus()).thenReturn(status);
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message").doesNotExist());
}
@Test
void importStatus_returns401_whenUnauthenticated() throws Exception {
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isUnauthorized());
}
@Test
@WithMockUser(authorities = "READ_ALL")
void importStatus_returns403_whenUserLacksAdminPermission() throws Exception {
mockMvc.perform(get("/api/admin/import-status"))
.andExpect(status().isForbidden());
}
@Test @Test
void backfillVersions_returns401_whenUnauthenticated() throws Exception { void backfillVersions_returns401_whenUnauthenticated() throws Exception {
mockMvc.perform(post("/api/admin/backfill-versions")) mockMvc.perform(post("/api/admin/backfill-versions"))

View File

@@ -20,13 +20,10 @@ import org.springframework.security.test.context.support.WithMockUser;
import org.springframework.test.context.bean.override.mockito.MockitoBean; import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.MockMvc;
import org.mockito.ArgumentCaptor;
import java.time.LocalDateTime; import java.time.LocalDateTime;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.UUID;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.*; import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
@@ -150,30 +147,6 @@ class InviteControllerTest {
.andExpect(jsonPath("$.label").value("Für Familie")); .andExpect(jsonPath("$.label").value("Für Familie"));
} }
@Test
@WithMockUser(username = "admin@test.com", authorities = {"ADMIN_USER"})
void createInvite_forwardsGroupIdsToService() throws Exception {
UUID groupId = UUID.randomUUID();
AppUser admin = AppUser.builder().id(UUID.randomUUID()).email("admin@test.com").build();
when(userService.findByEmail("admin@test.com")).thenReturn(admin);
InviteToken savedToken = InviteToken.builder()
.id(UUID.randomUUID()).code("ABCDE12345").useCount(0).build();
when(inviteService.createInvite(any(), eq(admin))).thenReturn(savedToken);
when(inviteService.toListItemDTO(any(), anyString()))
.thenReturn(makeInviteDTO(savedToken.getId(), "ABCDE12345"));
String body = "{\"groupIds\":[\"" + groupId + "\"]}";
mockMvc.perform(post("/api/invites")
.contentType(MediaType.APPLICATION_JSON)
.content(body))
.andExpect(status().isCreated());
ArgumentCaptor<CreateInviteRequest> captor = ArgumentCaptor.forClass(CreateInviteRequest.class);
verify(inviteService).createInvite(captor.capture(), eq(admin));
assertThat(captor.getValue().getGroupIds()).containsExactly(groupId);
}
// ─── DELETE /api/invites/{id} ───────────────────────────────────────────── // ─── DELETE /api/invites/{id} ─────────────────────────────────────────────
@Test @Test

View File

@@ -156,35 +156,6 @@ class InviteServiceTest {
assertThat(result.getGroupIds()).contains(g.getId()); assertThat(result.getGroupIds()).contains(g.getId());
} }
@Test
void createInvite_throwsGroupNotFound_whenSubmittedGroupIdDoesNotExist() {
UUID unknownGroupId = UUID.randomUUID();
when(userService.findGroupsByIds(anyList())).thenReturn(List.of());
CreateInviteRequest req = new CreateInviteRequest();
req.setGroupIds(List.of(unknownGroupId));
assertThatThrownBy(() -> inviteService.createInvite(req, admin))
.isInstanceOf(DomainException.class)
.extracting(e -> ((DomainException) e).getCode())
.isEqualTo(ErrorCode.GROUP_NOT_FOUND);
}
@Test
void createInvite_doesNotThrowGroupNotFound_whenDuplicateGroupIdsSubmitted() {
UUID groupId = UUID.randomUUID();
UserGroup group = UserGroup.builder().id(groupId).name("Familie").build();
when(inviteTokenRepository.findByCode(anyString())).thenReturn(Optional.empty());
when(userService.findGroupsByIds(anyList())).thenReturn(List.of(group));
when(inviteTokenRepository.save(any())).thenAnswer(inv -> inv.getArgument(0));
CreateInviteRequest req = new CreateInviteRequest();
req.setGroupIds(List.of(groupId, groupId)); // same UUID submitted twice
// before deduplication: size(groups)==1 != size(submitted)==2 → false GROUP_NOT_FOUND
assertThatCode(() -> inviteService.createInvite(req, admin)).doesNotThrowAnyException();
}
// ─── redeemInvite ───────────────────────────────────────────────────────── // ─── redeemInvite ─────────────────────────────────────────────────────────
@Test @Test

View File

@@ -1,78 +0,0 @@
package org.raddatz.familienarchiv.user;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.raddatz.familienarchiv.PostgresContainerConfig;
import org.raddatz.familienarchiv.config.FlywayConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.jdbc.test.autoconfigure.AutoConfigureTestDatabase;
import org.springframework.boot.data.jpa.test.autoconfigure.DataJpaTest;
import org.springframework.context.annotation.Import;
import java.time.LocalDateTime;
import java.util.Set;
import java.util.UUID;
import static org.assertj.core.api.Assertions.assertThat;
@DataJpaTest
@AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
@Import({PostgresContainerConfig.class, FlywayConfig.class})
class InviteTokenRepositoryIntegrationTest {
@Autowired InviteTokenRepository inviteTokenRepository;
@Autowired UserGroupRepository userGroupRepository;
@Autowired AppUserRepository appUserRepository;
private UserGroup group;
private AppUser admin;
@BeforeEach
void setUp() {
inviteTokenRepository.deleteAll();
userGroupRepository.deleteAll();
appUserRepository.deleteAll();
admin = appUserRepository.save(AppUser.builder().email("admin@test.com").password("pw").build());
group = userGroupRepository.save(UserGroup.builder().name("Familie").build());
}
// ─── existsActiveWithGroupId ──────────────────────────────────────────────
@Test
void existsActiveWithGroupId_returnsTrueForActiveInviteLinkedToGroup() {
inviteTokenRepository.save(token(t -> t));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isTrue();
}
@Test
void existsActiveWithGroupId_returnsFalseWhenInviteIsRevoked() {
inviteTokenRepository.save(token(t -> t.revoked(true)));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
}
@Test
void existsActiveWithGroupId_returnsFalseWhenInviteIsExpired() {
inviteTokenRepository.save(token(t -> t.expiresAt(LocalDateTime.now().minusDays(1))));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
}
@Test
void existsActiveWithGroupId_returnsFalseWhenInviteIsExhausted() {
inviteTokenRepository.save(token(t -> t.maxUses(1).useCount(1)));
assertThat(inviteTokenRepository.existsActiveWithGroupId(group.getId())).isFalse();
}
// ─── helpers ─────────────────────────────────────────────────────────────
private InviteToken token(java.util.function.UnaryOperator<InviteToken.InviteTokenBuilder> customizer) {
InviteToken.InviteTokenBuilder builder = InviteToken.builder()
.code(UUID.randomUUID().toString().replace("-", "").substring(0, 10))
.groupIds(new java.util.HashSet<>(Set.of(group.getId())))
.createdBy(admin);
return customizer.apply(builder).build();
}
}

View File

@@ -36,7 +36,6 @@ class UserServiceTest {
@Mock AppUserRepository userRepository; @Mock AppUserRepository userRepository;
@Mock UserGroupRepository groupRepository; @Mock UserGroupRepository groupRepository;
@Mock InviteTokenRepository inviteTokenRepository;
@Mock PasswordEncoder passwordEncoder; @Mock PasswordEncoder passwordEncoder;
@Mock AuditService auditService; @Mock AuditService auditService;
@InjectMocks UserService userService; @InjectMocks UserService userService;
@@ -904,29 +903,6 @@ class UserServiceTest {
assertThat(result.getPermissions()).containsExactlyInAnyOrder("READ_ALL", "WRITE_ALL"); assertThat(result.getPermissions()).containsExactlyInAnyOrder("READ_ALL", "WRITE_ALL");
} }
// ─── deleteGroup ──────────────────────────────────────────────────────────
@Test
void deleteGroup_throwsConflict_whenActiveInviteReferencesGroup() {
UUID groupId = UUID.randomUUID();
when(inviteTokenRepository.existsActiveWithGroupId(groupId)).thenReturn(true);
assertThatThrownBy(() -> userService.deleteGroup(groupId))
.isInstanceOf(DomainException.class)
.extracting(e -> ((DomainException) e).getCode())
.isEqualTo(ErrorCode.GROUP_HAS_ACTIVE_INVITES);
}
@Test
void deleteGroup_deletesGroup_whenNoActiveInviteReferencesGroup() {
UUID groupId = UUID.randomUUID();
when(inviteTokenRepository.existsActiveWithGroupId(groupId)).thenReturn(false);
userService.deleteGroup(groupId);
verify(groupRepository).deleteById(groupId);
}
@Test @Test
void createGroup_withNullPermissions_savesGroupWithEmptyPermissionSet() { void createGroup_withNullPermissions_savesGroupWithEmptyPermissionSet() {
org.raddatz.familienarchiv.user.GroupDTO dto = new org.raddatz.familienarchiv.user.GroupDTO(); org.raddatz.familienarchiv.user.GroupDTO dto = new org.raddatz.familienarchiv.user.GroupDTO();

View File

@@ -13,18 +13,3 @@ spring:
password: test password: test
mail: mail:
host: localhost host: localhost
# Disable OTel SDK entirely in tests — prevents auto-configuration from loading resource providers
# (e.g. AzureAppServiceResourceProvider) that fail against the semconv version used here.
otel:
sdk:
disabled: true
# Disable trace export in tests — prevents OTLP connection attempts when no Tempo is running.
# Sampling probability 0.0 means no spans are created, so no export is attempted.
management:
server:
port: 0 # random port per context — prevents TIME_WAIT conflicts when @DirtiesContext restarts the context
tracing:
sampling:
probability: 0.0

View File

@@ -1,2 +0,0 @@
logging.level.root=WARN
logging.level.org.raddatz=INFO

View File

@@ -1,266 +0,0 @@
# Observability stack — Grafana LGTM + GlitchTip
#
# Requires the main stack to be running first:
# docker compose up -d # creates archiv-net
# docker compose -f docker-compose.observability.yml up -d
#
# To validate without starting:
# docker compose -f docker-compose.observability.yml config
services:
# --- Metrics: Prometheus ---
prometheus:
image: prom/prometheus:v3.4.0
container_name: obs-prometheus
restart: unless-stopped
volumes:
- ./infra/observability/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=30d'
- '--web.enable-lifecycle'
ports:
- "127.0.0.1:${PORT_PROMETHEUS:-9090}:9090"
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:9090/-/healthy"]
interval: 30s
timeout: 5s
retries: 3
networks:
- archiv-net
- obs-net
node-exporter:
image: prom/node-exporter:v1.9.0
container_name: obs-node-exporter
restart: unless-stopped
# pid: host — required for process-level CPU/memory metrics; cgroup isolation applies
pid: host
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
# $$ is YAML Compose escaping for a literal $ in the regex alternation
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
expose:
- "9100"
networks:
- obs-net
cadvisor:
image: gcr.io/cadvisor/cadvisor:v0.52.1
container_name: obs-cadvisor
restart: unless-stopped
# privileged: true — required for cgroup and namespace metrics, see cAdvisor docs.
# Accepted risk: cAdvisor is pinned, on Renovate, and not exposed outside obs-net.
privileged: true
volumes:
- /:/rootfs:ro
# /var/run/docker.sock mounted read-only — sufficient for container metadata discovery
- /var/run/docker.sock:/var/run/docker.sock:ro
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
expose:
- "8080"
networks:
- obs-net
# --- Logs: Loki + Promtail ---
loki:
image: grafana/loki:3.4.2
container_name: obs-loki
restart: unless-stopped
volumes:
- ./infra/observability/loki/loki-config.yml:/etc/loki/loki-config.yml:ro
- loki_data:/loki
command: -config.file=/etc/loki/loki-config.yml
expose:
- "3100"
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:3100/ready | grep -q ready || exit 1"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
networks:
- obs-net
promtail:
image: grafana/promtail:3.4.2
container_name: obs-promtail
restart: unless-stopped
volumes:
- ./infra/observability/promtail/promtail-config.yml:/etc/promtail/promtail-config.yml:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
# :ro restricts file-system access but NOT Docker API permissions — a compromised Promtail has full daemon access. Accepted risk on single-operator self-hosted archive.
- /var/run/docker.sock:/var/run/docker.sock:ro
- promtail_positions:/tmp # persists positions.yaml across restarts — avoids duplicate log ingestion
command: -config.file=/etc/promtail/promtail-config.yml
networks:
- archiv-net # label discovery from application containers via Docker socket
- obs-net # log shipping to Loki
depends_on:
loki:
condition: service_healthy
# --- Traces: Tempo ---
tempo:
image: grafana/tempo:2.7.2
container_name: obs-tempo
restart: unless-stopped
volumes:
- ./infra/observability/tempo/tempo.yml:/etc/tempo.yml:ro
- tempo_data:/var/tempo
command: -config.file=/etc/tempo.yml
expose:
- "3200" # Grafana queries Tempo on this port (obs-net only)
- "4317" # OTLP gRPC — backend sends traces here (archiv-net)
- "4318" # OTLP HTTP — alternative transport (archiv-net)
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:3200/ready | grep -q ready || exit 1"]
interval: 10s
timeout: 5s
retries: 5
start_period: 15s
networks:
- archiv-net # backend (archive-backend) reaches tempo:4317 over this network
- obs-net # Grafana reaches tempo:3200 over this network
# --- Dashboards: Grafana ---
obs-grafana:
image: grafana/grafana-oss:11.6.1
container_name: obs-grafana
restart: unless-stopped
ports:
- "127.0.0.1:${PORT_GRAFANA:-3003}:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:-changeme}
GF_USERS_ALLOW_SIGN_UP: "false"
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL:-http://localhost:3003}
volumes:
- grafana_data:/var/lib/grafana
- ./infra/observability/grafana/provisioning:/etc/grafana/provisioning:ro
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:3000/api/health | grep -q ok || exit 1"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
prometheus:
condition: service_healthy
loki:
condition: service_healthy
tempo:
condition: service_healthy
networks:
- obs-net
# --- Error Tracking: GlitchTip ---
obs-redis:
image: redis:7-alpine
container_name: obs-redis
restart: unless-stopped
volumes:
- glitchtip_data:/data
expose:
- "6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- obs-net
obs-glitchtip:
image: glitchtip/glitchtip:6.1.6
container_name: obs-glitchtip
restart: unless-stopped
depends_on:
obs-redis:
condition: service_healthy
obs-glitchtip-db-init:
condition: service_completed_successfully
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST:-archive-db}:5432/glitchtip
REDIS_URL: redis://obs-redis:6379/0
SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
GLITCHTIP_DOMAIN: ${GLITCHTIP_DOMAIN:-http://localhost:3002}
DEFAULT_FROM_EMAIL: ${APP_MAIL_FROM:-noreply@familienarchiv.local}
EMAIL_URL: smtp://mailpit:1025
GLITCHTIP_MAX_EVENT_LIFE_DAYS: 90
ports:
- "127.0.0.1:${PORT_GLITCHTIP:-3002}:8000"
healthcheck:
test: ["CMD", "bash", "-c", "echo > /dev/tcp/localhost/8000"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
networks:
- archiv-net
- obs-net
obs-glitchtip-worker:
image: glitchtip/glitchtip:6.1.6
container_name: obs-glitchtip-worker
restart: unless-stopped
command: ./bin/run-celery-with-beat.sh
depends_on:
obs-redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST:-archive-db}:5432/glitchtip
REDIS_URL: redis://obs-redis:6379/0
SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
networks:
- archiv-net
- obs-net
obs-glitchtip-db-init:
image: postgres:16-alpine
container_name: obs-glitchtip-db-init
restart: "no"
environment:
PGPASSWORD: ${POSTGRES_PASSWORD}
command: >
sh -c "psql -h ${POSTGRES_HOST:-archive-db} -U ${POSTGRES_USER} -tc
\"SELECT 1 FROM pg_database WHERE datname = 'glitchtip'\" |
grep -q 1 ||
psql -h ${POSTGRES_HOST:-archive-db} -U ${POSTGRES_USER} -c \"CREATE DATABASE glitchtip;\""
networks:
- archiv-net
networks:
# Shared network created by the main docker-compose.yml.
# The observability stack joins as a peer so Prometheus can scrape
# archive-backend by container name. The observability stack must NOT
# attempt to create this network — it will fail with a clear error if
# the main stack is not running yet.
archiv-net:
external: true
# Internal network for observability-service-to-service traffic
# (e.g. Grafana → Prometheus, Grafana → Loki, Grafana → Tempo).
obs-net:
driver: bridge
volumes:
prometheus_data:
loki_data:
promtail_positions:
tempo_data:
grafana_data:
glitchtip_data:

View File

@@ -39,7 +39,6 @@
networks: networks:
archiv-net: archiv-net:
driver: bridge driver: bridge
name: ${COMPOSE_NETWORK_NAME:-archiv-net}
volumes: volumes:
postgres-data: postgres-data:
@@ -213,11 +212,10 @@ services:
APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud} APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud}
SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true} SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true}
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true} SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true}
OTEL_EXPORTER_OTLP_ENDPOINT: http://tempo:4317
networks: networks:
- archiv-net - archiv-net
healthcheck: healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:8081/actuator/health | grep -q UP || exit 1"] test: ["CMD-SHELL", "wget -qO- http://localhost:8080/actuator/health | grep -q UP || exit 1"]
interval: 15s interval: 15s
timeout: 5s timeout: 5s
retries: 10 retries: 10

View File

@@ -147,20 +147,8 @@ services:
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-false} SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-false}
APP_OCR_BASE_URL: http://ocr-service:8000 APP_OCR_BASE_URL: http://ocr-service:8000
APP_OCR_TRAINING_TOKEN: "${OCR_TRAINING_TOKEN:-}" APP_OCR_TRAINING_TOKEN: "${OCR_TRAINING_TOKEN:-}"
SENTRY_DSN: ${SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${SENTRY_TRACES_SAMPLE_RATE:-1.0}
# Observability: send traces to Tempo inside archiv-net (OTLP gRPC port 4317)
# Tempo is defined in docker-compose.observability.yml (future issue).
# OTLP failures are non-fatal — backend starts cleanly without the observability stack.
OTEL_EXPORTER_OTLP_ENDPOINT: http://tempo:4317
# 10% sampling in this compose (dev + staging) — override locally to 1.0 if needed
MANAGEMENT_TRACING_SAMPLING_PROBABILITY: "0.1"
ports: ports:
- "${PORT_BACKEND}:8080" - "${PORT_BACKEND}:8080"
# Management port — Prometheus scrapes /actuator/prometheus from inside archiv-net.
# Not exposed to the host; Docker service-name DNS (backend:8081) is sufficient.
expose:
- "8081"
networks: networks:
- archiv-net - archiv-net
healthcheck: healthcheck:

View File

@@ -63,7 +63,7 @@ Members of the cross-cutting layer have no entity of their own, no user-facing C
| `audit` | Append-only event store (`audit_log`) for all domain mutations. Feeds the activity feed and Family Pulse dashboard. | Consumed by 5+ domains; no user-facing CRUD of its own | | `audit` | Append-only event store (`audit_log`) for all domain mutations. Feeds the activity feed and Family Pulse dashboard. | Consumed by 5+ domains; no user-facing CRUD of its own |
| `config` | Infrastructure bean definitions: `MinioConfig`, `AsyncConfig`, `WebConfig` | Framework infra; no business logic | | `config` | Infrastructure bean definitions: `MinioConfig`, `AsyncConfig`, `WebConfig` | Framework infra; no business logic |
| `dashboard` | Stats aggregation for the admin dashboard and Family Pulse widget | Aggregates from 3+ domains; no owned entities | | `dashboard` | Stats aggregation for the admin dashboard and Family Pulse widget | Aggregates from 3+ domains; no owned entities |
| `exception` | `DomainException`, `ErrorCode` enum, `GlobalExceptionHandler` | Framework infra; consumed by every controller and service. Adding a new `ErrorCode` requires matching updates in `frontend/src/lib/shared/errors.ts` and all three `messages/*.json` locale files. | | `exception` | `DomainException`, `ErrorCode` enum, `GlobalExceptionHandler` | Framework infra; consumed by every controller and service |
| `filestorage` | `FileService` — MinIO/S3 upload, download, presigned-URL generation | Generic service; consumed by `document` and `ocr` | | `filestorage` | `FileService` — MinIO/S3 upload, download, presigned-URL generation | Generic service; consumed by `document` and `ocr` |
| `importing` | `MassImportService` — async ODS/Excel batch import | Orchestrates across `person`, `tag`, `document` | | `importing` | `MassImportService` — async ODS/Excel batch import | Orchestrates across `person`, `tag`, `document` |
| `security` | `SecurityConfig`, `Permission` enum, `@RequirePermission` annotation, `PermissionAspect` (AOP) | Framework infra; enforced globally across all controllers | | `security` | `SecurityConfig`, `Permission` enum, `@RequirePermission` annotation, `PermissionAspect` (AOP) | Framework infra; enforced globally across all controllers |

View File

@@ -43,7 +43,6 @@ graph TD
- SSE notifications transit Caddy (browser → Caddy → backend); the backend is never reachable directly from the public internet. The SvelteKit SSR layer is bypassed for SSE, but Caddy is not. - SSE notifications transit Caddy (browser → Caddy → backend); the backend is never reachable directly from the public internet. The SvelteKit SSR layer is bypassed for SSE, but Caddy is not.
- The Caddyfile responds `404` on `/actuator/*` (defense in depth). Internal monitoring scrapes the backend on the docker network, not through Caddy. - The Caddyfile responds `404` on `/actuator/*` (defense in depth). Internal monitoring scrapes the backend on the docker network, not through Caddy.
- Production and staging cohabit on the same host via docker compose project names: `archiv-production` (ports 8080/3000) and `archiv-staging` (ports 8081/3001). - Production and staging cohabit on the same host via docker compose project names: `archiv-production` (ports 8080/3000) and `archiv-staging` (ports 8081/3001).
- An optional observability stack (Prometheus, Node Exporter, cAdvisor, Loki, Tempo, Grafana, GlitchTip) runs as a separate compose file. Configuration lives under `infra/observability/`. In production and CI, the stack is managed from `/opt/familienarchiv/` (CI copies it there on every nightly run) so bind mounts survive workspace wipes — see §4 for the ops procedure.
### OCR memory requirements ### OCR memory requirements
@@ -107,9 +106,6 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
| `MAIL_SMTP_AUTH` | SMTP auth enabled | `false` (dev) | YES (prod) | — | | `MAIL_SMTP_AUTH` | SMTP auth enabled | `false` (dev) | YES (prod) | — |
| `MAIL_STARTTLS_ENABLE` | STARTTLS enabled | `false` (dev) | YES (prod) | — | | `MAIL_STARTTLS_ENABLE` | STARTTLS enabled | `false` (dev) | YES (prod) | — |
| `SPRING_PROFILES_ACTIVE` | Spring profile | `dev,e2e` (compose) | YES | — | | `SPRING_PROFILES_ACTIVE` | Spring profile | `dev,e2e` (compose) | YES | — |
| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP gRPC endpoint for distributed traces (Tempo). Set to `http://tempo:4317` via compose. | `http://localhost:4317` | — | — |
| `MANAGEMENT_TRACING_SAMPLING_PROBABILITY` | Micrometer tracing sample rate; overridden to `0.0` in test profile. | `0.1` (compose) / `1.0` (dev) | — | — |
| `SENTRY_DSN` | GlitchTip / Sentry DSN for backend error reporting. Leave empty to disable the SDK. Set after GlitchTip first-run (§4). | — | — | YES |
### PostgreSQL container ### PostgreSQL container
@@ -138,19 +134,6 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
| `BLLA_MODEL_PATH` | Kraken baseline layout analysis model path | `/app/models/blla.mlmodel` | — | — | | `BLLA_MODEL_PATH` | Kraken baseline layout analysis model path | `/app/models/blla.mlmodel` | — | — |
| `OCR_MEM_LIMIT` | Container memory cap for ocr-service in `docker-compose.prod.yml`. Set to `6g` on CX32 hosts; leave unset on CX42+ to use the 12g default | `12g` (prod compose default) | — | — | | `OCR_MEM_LIMIT` | Container memory cap for ocr-service in `docker-compose.prod.yml`. Set to `6g` on CX32 hosts; leave unset on CX42+ to use the 12g default | `12g` (prod compose default) | — | — |
### Observability stack (`docker-compose.observability.yml`)
| Variable | Purpose | Default | Required? | Sensitive? |
|---|---|---|---|---|
| `PORT_PROMETHEUS` | Host port for the Prometheus UI (bound to `127.0.0.1` only) | `9090` | — | — |
| `PORT_GRAFANA` | Host port for the Grafana UI (bound to `127.0.0.1` only) | `3003` | — | — |
| `POSTGRES_HOST` | PostgreSQL hostname for GlitchTip's db-init job and workers. Override when only the staging stack is running and `archive-db` is not resolvable by that name. | `archive-db` | — | — |
| `GRAFANA_ADMIN_PASSWORD` | Grafana `admin` user password | `changeme` | YES (prod) | YES |
| `PORT_GLITCHTIP` | Host port for the GlitchTip UI (bound to `127.0.0.1` only) | `3002` | — | — |
| `GLITCHTIP_DOMAIN` | Public-facing base URL for GlitchTip (used in email links and CORS) | `http://localhost:3002` | YES (prod) | — |
| `GLITCHTIP_SECRET_KEY` | Django secret key for GlitchTip — generate with `python3 -c "import secrets; print(secrets.token_hex(32))"` | — | YES | YES |
| `VITE_SENTRY_DSN` | GlitchTip/Sentry DSN for the frontend (SvelteKit) — injected at build time via Vite. Leave empty to disable. Set after GlitchTip first-run (§4). | — | — | YES |
--- ---
## 3. Bootstrap from scratch ## 3. Bootstrap from scratch
@@ -196,29 +179,6 @@ curl -fsSL https://tailscale.com/install.sh | sh && tailscale up
# files to disk during execution (cleaned up unconditionally on completion). # files to disk during execution (cleaned up unconditionally on completion).
# A multi-tenant runner would need to switch to stdin-piped env files. # A multi-tenant runner would need to switch to stdin-piped env files.
# (See https://docs.gitea.com/usage/actions/quickstart for the register step.) # (See https://docs.gitea.com/usage/actions/quickstart for the register step.)
# Runner workspace directory — required for DooD bind-mount resolution (ADR-015).
# act_runner stores job workspaces here so that docker compose bind mounts resolve
# to real host paths. The path must be identical on the host and inside job containers.
mkdir -p /srv/gitea-workspace
# Observability config permanent directory — the nightly CI job copies
# docker-compose.observability.yml and infra/observability/ here on every run.
# The obs stack is always started from this path, not from the workspace.
# See ADR-016 for why this directory is used instead of a server-pull approach.
mkdir -p /opt/familienarchiv/infra
# Both paths must also appear in the runner service volumes in ~/docker/gitea/compose.yaml:
# volumes:
# - /srv/gitea-workspace:/srv/gitea-workspace
# /opt/familienarchiv does NOT need to be in the runner container's volumes — job
# containers are spawned by the host daemon directly (DooD), so the host path is
# accessible to them as long as runner-config.yaml lists it in valid_volumes + options.
# See runner-config.yaml (workdir_parent + valid_volumes + options) and ADR-015/016.
# ⚠ IMPORTANT: after any change to runner-config.yaml (valid_volumes, options, workdir_parent),
# restart the Gitea Act runner for the new config to take effect:
# docker restart gitea-runner
# Until restarted, job containers are spawned with the old config and any new bind mounts
# (e.g. /opt/familienarchiv) will not be available inside job steps.
``` ```
### 3.2 DNS records ### 3.2 DNS records
@@ -249,10 +209,6 @@ git.raddatz.cloud A <server IP>
| `MAIL_PORT` | release.yml | typically `587` | | `MAIL_PORT` | release.yml | typically `587` |
| `MAIL_USERNAME` | release.yml | SMTP user | | `MAIL_USERNAME` | release.yml | SMTP user |
| `MAIL_PASSWORD` | release.yml | SMTP password | | `MAIL_PASSWORD` | release.yml | SMTP password |
| `GRAFANA_ADMIN_PASSWORD` | both | Grafana `admin` login — generate a strong password |
| `GLITCHTIP_SECRET_KEY` | both | Django secret key — `openssl rand -hex 32` |
| `SENTRY_DSN` | both | GlitchTip project DSN — set after first-run (§4); leave empty to keep Sentry disabled |
| `VITE_SENTRY_DSN` | both | GlitchTip frontend project DSN — set after first-run (§4); leave empty to keep Sentry disabled |
### 3.4 First deploy ### 3.4 First deploy
@@ -300,156 +256,9 @@ docker compose logs --tail=200 <service>
- **Spring Actuator health**: `http://localhost:8080/actuator/health` (internal only in prod — port 8081 for Prometheus scraping) - **Spring Actuator health**: `http://localhost:8080/actuator/health` (internal only in prod — port 8081 for Prometheus scraping)
- **Prometheus scraping**: management port 8081, path `/actuator/prometheus`. Internal only; Caddy blocks `/actuator/*` externally. - **Prometheus scraping**: management port 8081, path `/actuator/prometheus`. Internal only; Caddy blocks `/actuator/*` externally.
### Observability stack ### Future observability
An observability stack is available via `docker-compose.observability.yml`. Configuration lives under `infra/observability/`. Phase 7 of the Production v1 milestone adds Prometheus + Loki + Grafana. No monitoring infrastructure is in place yet.
#### Dev — start from the workspace
```bash
docker compose up -d # creates archiv-net
docker compose -f docker-compose.observability.yml up -d
```
#### Why the obs stack is managed differently from the main app stack
The main app stack (`docker-compose.prod.yml`) has no config-file bind mounts — its containers read config from env vars and image defaults. The workspace is wiped after each CI run but that does not affect running containers, because they hold no references to workspace paths.
The obs stack is different: `prometheus.yml`, `tempo.yml`, Loki config, Grafana provisioning files, and Promtail config are all bind-mounted from the host filesystem into their containers. If those source paths disappear (workspace wipe), the containers can restart fine until a `docker compose up` is run again — at that point Docker tries to re-resolve the bind-mount source and fails because the workspace path no longer exists.
The fix is to keep the obs compose file and config tree at a **permanent path** that CI copies to on every run but which survives between runs: `/opt/familienarchiv/` (see ADR-016).
#### Production — managed from `/opt/familienarchiv/`
Every CI run (nightly + release) copies `docker-compose.observability.yml` and `infra/observability/` to `/opt/familienarchiv/` before starting the stack. Bind mounts then resolve to `/opt/familienarchiv/infra/observability/…` — a stable path that outlasts any workspace wipe.
**Environment variables** follow the same two-source model as the main stack:
| Source | What it contains | Managed by |
|---|---|---|
| `infra/observability/obs.env` | All non-secret config (ports, URLs, hostnames) | Git — reviewed in PRs |
| `/opt/familienarchiv/obs-secrets.env` | Passwords and secret keys only | CI — written fresh from Gitea secrets on every deploy |
Both files are passed explicitly via `--env-file` to the compose command, so there is no implicit auto-read `.env` and no operator-managed file to keep in sync.
**Non-secret config** (`infra/observability/obs.env`):
| Key | Value | Notes |
|---|---|---|
| `PORT_GRAFANA` | `3003` | Avoids collision with staging frontend on port 3001 |
| `PORT_GLITCHTIP` | `3002` | |
| `PORT_PROMETHEUS` | `9090` | |
| `GF_SERVER_ROOT_URL` | `https://grafana.archiv.raddatz.cloud` | Required for alert email links and OAuth redirects |
| `GLITCHTIP_DOMAIN` | `https://glitchtip.archiv.raddatz.cloud` | Must match the Caddy vhost |
| `POSTGRES_HOST` | `archive-db` | Override if only the staging stack is running |
**Secret keys** (set in Gitea secrets, injected by CI into `obs-secrets.env`):
| Gitea secret | Notes |
|---|---|
| `GRAFANA_ADMIN_PASSWORD` | Strong unique password; shared by nightly and release |
| `GLITCHTIP_SECRET_KEY` | `openssl rand -hex 32`; shared by nightly and release |
| `STAGING_POSTGRES_PASSWORD` / `PROD_POSTGRES_PASSWORD` | Must match the running PostgreSQL container |
To start or restart the obs stack manually on the server (after CI has run at least once):
```bash
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
up -d --wait --remove-orphans
```
> **Note (manual ops only):** CI clears the destination with `rm -rf` before copying, so deleted files are removed automatically on the next run. If you copy manually with `cp -r` without first removing the directory, stale files from deleted configs will persist until cleaned up:
> ```bash
> rm /opt/familienarchiv/infra/observability/<path-to-removed-file>
> ```
Current services:
| Service | Image | Purpose |
|---|---|---|
| `obs-prometheus` | `prom/prometheus:v3.4.0` | Scrapes metrics from backend management port 8081 (`/actuator/prometheus`), node-exporter, and cAdvisor |
| `obs-node-exporter` | `prom/node-exporter:v1.9.0` | Host-level CPU / memory / disk / network metrics |
| `obs-cadvisor` | `gcr.io/cadvisor/cadvisor:v0.52.1` | Per-container resource metrics |
| `obs-loki` | `grafana/loki:3.4.2` | Log aggregation — receives log streams from Promtail. Port 3100 is `expose`-only (not host-bound). |
| `obs-promtail` | `grafana/promtail:3.4.2` | Log shipping agent — reads all Docker container logs via the Docker socket and forwards them to Loki with `container_name`, `compose_service`, and `compose_project` labels |
| `obs-tempo` | `grafana/tempo:2.7.2` | Distributed trace storage — OTLP gRPC receiver on port 4317, OTLP HTTP on port 4318 (both `archiv-net`-internal). Grafana queries traces on port 3200 (`obs-net`-internal). All ports are `expose`-only (not host-bound). |
| `obs-grafana` | `grafana/grafana-oss:11.6.1` | Unified observability UI — metrics dashboards, log exploration, trace viewer. Bound to `127.0.0.1:${PORT_GRAFANA:-3003}` on the host. |
| `obs-glitchtip` | `glitchtip/glitchtip:6.1.6` | Sentry-compatible error tracker. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces. Bound to `127.0.0.1:${PORT_GLITCHTIP:-3002}`. |
| `obs-glitchtip-worker` | `glitchtip/glitchtip:6.1.6` | Celery + beat worker — processes async GlitchTip tasks (event ingestion, notifications, cleanup). |
| `obs-redis` | `redis:7-alpine` | Celery task broker for GlitchTip. Internal to `obs-net`; no host port exposed. |
| `obs-glitchtip-db-init` | `postgres:16-alpine` | One-shot init container. Creates the `glitchtip` database on the existing `archive-db` PostgreSQL instance if it does not already exist. Runs at stack startup; exits cleanly once done. |
#### Grafana
| Item | Value |
|---|---|
| URL | `http://localhost:3003` (or `http://localhost:$PORT_GRAFANA`) |
| Username | `admin` |
| Password | `$GRAFANA_ADMIN_PASSWORD` (default: `changeme`**change before exposing to a network**) |
Datasources are auto-provisioned on first start (Prometheus, Loki, Tempo — no manual setup required). Three dashboards are pre-loaded:
| Dashboard | Grafana ID | Purpose |
|---|---|---|
| Node Exporter Full | 1860 | Host CPU, memory, disk, network |
| Spring Boot Observability | 17175 | JVM metrics, HTTP latency, error rate |
| Loki Logs | 13639 | Log exploration and filtering |
Tempo traces are accessible via Grafana Explore → Tempo datasource, and linked from Loki logs via the `traceId` derived field.
**Loki quick checks** (after ~60 s, run from inside the `obs-loki` container):
```bash
# Loki health
docker exec obs-loki wget -qO- http://localhost:3100/ready
# List labels
docker exec obs-loki wget -qO- 'http://localhost:3100/loki/api/v1/labels'
# Query logs by service (stable across dev and prod environments)
docker exec obs-loki wget -qO- \
'http://localhost:3100/loki/api/v1/query_range?query=%7Bcompose_service%3D%22backend%22%7D&limit=5'
```
**Prefer `compose_service` over `container_name` in LogQL queries**`container_name` differs between dev (`archive-backend`) and prod (`archiv-production-backend-1`), while `compose_service` is stable (`backend`, `db`, `minio`, etc.).
Prometheus port `9090` and Grafana port `3003` (default; configurable via `PORT_GRAFANA`) are bound to `127.0.0.1` on the host. No other observability ports are host-bound.
#### GlitchTip
| Item | Value |
|---|---|
| URL | `http://localhost:3002` (or `http://localhost:$PORT_GLITCHTIP`) |
**Required env vars** — set in `.env` before first start:
```bash
GLITCHTIP_SECRET_KEY=$(python3 -c "import secrets; print(secrets.token_hex(32))")
GLITCHTIP_DOMAIN=http://localhost:3002 # change to your public URL in prod
PORT_GLITCHTIP=3002 # optional, defaults to 3002
```
**Database:** GlitchTip shares the existing `archive-db` PostgreSQL instance. The `obs-glitchtip-db-init` one-shot container creates a dedicated `glitchtip` database on first stack start — no manual step required.
**First-run steps** (one-time, after `docker compose -f docker-compose.observability.yml up -d`):
```bash
# 1. Create the Django superuser (interactive)
docker exec -it obs-glitchtip ./manage.py createsuperuser
# 2. Open the GlitchTip UI and log in
open http://localhost:3002
# 3. Create an organisation (e.g. "Familienarchiv")
# 4. Create two projects:
# - "familienarchiv-frontend" (platform: JavaScript / SvelteKit)
# - "familienarchiv-backend" (platform: Java / Spring Boot)
# 5. Copy each project's DSN from Settings → Projects → <project> → Client Keys
# 6. Wire the DSNs into the backend and frontend via env vars (separate issue)
```
--- ---

View File

@@ -1,122 +0,0 @@
# ADR 014 — Pin actions/upload-artifact to v3 (Gitea act_runner v4 protocol incompatibility)
**Status:** Accepted
**Date:** 2026-05-14
**Issues:** [#557 — re-regression](https://git.raddatz.cloud/marcel/familienarchiv/issues/557) · [#14 — original incident](https://git.raddatz.cloud/marcel/familienarchiv/issues/14)
---
## Context
`actions/upload-artifact` is available in two incompatible major versions. The v4 client
uploads via a GitHub-specific artifact API that is **not implemented** in Gitea's
`act_runner` (the self-hosted CI substrate established by ADR-011). When a workflow step
uses `actions/upload-artifact@v4` on this runner, `act_runner` returns a non-zero exit
code from the v4 client even when all tests pass, producing:
> green test suite — red job status — no artifact uploaded
The failure lands in the upload step, _after_ the test output, making it hard to diagnose
from the build log.
### Incident history
| Date | Commit | Event |
|---|---|---|
| 2026-03-19 | `9f3f022e` | Original downgrade: `upload-artifact@v4 → v3` |
| 2026-03-19 | `4142c7cd` | Rationale committed; closes #14 |
| 2026-05-05 | `410b91e2` | Re-regression: upgraded back to v4 without referencing #14 |
| 2026-05-14 | this PR | Second downgrade + ADR + grep guard |
The root cause of the re-regression was institutional-memory failure: the original
rationale was captured only in a commit body, invisible at the point of change (the
`uses:` line). This ADR, the inline comments, and the grep guard are the three
defence layers that replace that missing breadcrumb.
---
## Decision
**Pin all `actions/upload-artifact` and `actions/download-artifact` call sites to `@v3`.**
Both action families share the same v4 protocol incompatibility with `act_runner`.
Pinning to the major tag (`@v3`) keeps us on the latest v3 patch without Renovate noise.
Three call sites are pinned:
- `.gitea/workflows/ci.yml` — "Upload coverage reports" step
- `.gitea/workflows/ci.yml` — "Upload screenshots" step
- `.gitea/workflows/coverage-flake-probe.yml` — "Upload coverage log on failure" step
Each pinned `uses:` line carries a load-bearing inline comment:
```yaml
# Gitea Actions (act_runner) does not implement upload-artifact v4 protocol — pinned per ADR-014. Do NOT upgrade. See #557.
- uses: actions/upload-artifact@v3
```
A CI grep guard enforces the constraint automatically (see below).
---
## Consequences
### Enforcement layers (defence in depth)
1. **Inline comments** on every `uses:` line — visible at the point of change.
2. **CI grep guard** in `.gitea/workflows/ci.yml` ("Assert no (upload|download)-artifact
past v3") — fails the build if a future commit re-introduces `@v4` or higher on any
workflow file. Anchored to YAML `uses:` lines to avoid false positives on embedded
shell strings. Includes a self-test that proves the regex catches v4+ before scanning
the repo.
3. **This ADR** — canonical rationale; cross-referenced by comments and guard message.
### How to spot the symptom
- Test suite output shows green (vitest, surefire, pytest all exit 0)
- CI job status shows red
- Artifacts section of the run is empty
- Build log shows a non-zero exit from the `Upload …` step immediately after green tests
### `@v3` maintenance-mode status
GitHub placed `actions/upload-artifact@v3` in maintenance mode (no new features) but it
has not been removed and carries no known unpatched CVE as of this writing. If GitHub
publishes a v3-specific security advisory, that is an additional trigger to re-evaluate
(see upgrade conditions below).
### When to remove this pin
Re-evaluate pinning **when either condition is met:**
1. `gitea/act_runner` ships a release with v4 artifact protocol support. Track upstream:
<https://gitea.com/gitea/act_runner>
2. `actions/upload-artifact@v3` acquires an unpatched CVE that cannot be mitigated
at the runner level.
When upgrading: remove the grep guard step, update all three `uses:` lines, remove the
inline comments, and update this ADR's status to Superseded.
---
## Alternatives
### SHA pinning (`uses: actions/upload-artifact@<sha>`)
More secure against action repository compromise, but adds Renovate update friction
and is disproportionate for a self-hosted, single-tenant Gitea instance with one
trusted contributor (ADR-011). Rejected.
### Minor/patch pinning (`@v3.4.0`)
Avoids Renovate PRs but freezes us on a specific patch. The v3 major track is in
maintenance mode — minor pinning has no benefit and would require manual updates
for any v3 security patches. Rejected.
### Renovate `packageRules` bypass
Would prevent automated PRs from proposing v4. Not needed while Renovate is not
configured for this repository. Revisit if Renovate is introduced.
### Migrating the runner to a v4-compatible Gitea release
Out of scope for this issue. A separate decision; tracked in #557's non-goals.

View File

@@ -1,69 +0,0 @@
# ADR-015: DooD workspace bind mount for Compose file bind-mount resolution
## Status
Accepted
## Context
The deploy workflows (`.gitea/workflows/nightly.yml`, `release.yml`) run job steps inside Docker containers via Docker-out-of-Docker (DooD): the Gitea runner mounts the host Docker socket, and act_runner spawns sibling containers for each job.
When a job step calls `docker compose -f docker-compose.observability.yml up`, Docker Compose resolves relative bind-mount sources against `$(pwd)` inside the job container and passes the resulting absolute paths to the **host** daemon. For example, `./infra/observability/prometheus/prometheus.yml` becomes `/some/path/infra/observability/prometheus/prometheus.yml`, and the host daemon tries to bind-mount that path from the **host filesystem**.
In the default DooD setup (`runner-config.yaml` with only `valid_volumes: ["/var/run/docker.sock"]`), job container workspaces live in the act_runner overlay2 layer. The host has no corresponding directory at the job container's `$(pwd)` path, so the daemon auto-creates an empty directory in its place. The container then fails to start because the mount target was expected to be a file, not a directory:
```
error mounting "…/prometheus/prometheus.yml" to rootfs at "/etc/prometheus/prometheus.yml": not a directory
```
This affected all five config file bind mounts in `docker-compose.observability.yml`.
## Decision
Configure act_runner to store job workspaces on a real host path (`/srv/gitea-workspace`) and mount that path into both the runner container and every job container at the **same absolute path**. The identity of the host path and container path is the key constraint: Compose resolves to an absolute path and hands it to the host daemon, which looks for that exact path on the host filesystem.
**runner-config.yaml changes:**
```yaml
container:
workdir_parent: /srv/gitea-workspace
valid_volumes:
- "/var/run/docker.sock"
- "/srv/gitea-workspace"
options: "-v /srv/gitea-workspace:/srv/gitea-workspace"
```
**Runner compose.yaml change** (host side — not in this repo):
```yaml
runner:
volumes:
- /srv/gitea-workspace:/srv/gitea-workspace
```
With this in place, `$(pwd)` inside a job container resolves to `/srv/gitea-workspace/<owner>/<repo>/`, which is a real directory on the host. Compose-managed bind mounts from that directory work without any additional steps.
## Alternatives Considered
| Alternative | Why rejected |
|---|---|
| **overlay2 `MergedDir` sync via privileged nsenter** (the previous approach, see PR #599 v1) | Required `--privileged --pid=host` (effective root on the host) plus fragile overlay2 driver assumption. Introduced stale-file risk on the host and a second stable path (`/srv/familienarchiv-*/obs-configs`) to maintain separately from the source tree. Replaced by this ADR. |
| **Build configs into a dedicated Docker image** (pattern used for MinIO bootstrap, see `infra/minio/Dockerfile`) | Viable for static files that change infrequently. Requires a build step and an image rebuild every time a config changes. Appropriate for bootstrap scripts; too heavy for frequently-tuned observability configs. |
| **Add workspace directory to runner-config `valid_volumes` only** (without `workdir_parent`) | `valid_volumes` whitelists paths that workflow steps may reference, but does not change where act_runner stores workspaces. Without `workdir_parent`, the workspace would still be in overlay2 and the bind-mount resolution problem would remain. |
| **Map workspace under a different host path than container path** (e.g. host `/srv/workspace`, container `/workspace`) | Compose resolves to the container-internal path (e.g. `/workspace/…`) and passes that to the host daemon. The host daemon interprets the source as a host path. If host `/workspace` does not exist, the daemon creates an empty directory — the original bug. The paths must be identical. |
## Consequences
- `/srv/gitea-workspace` must exist on the VPS before the runner starts. The directory was created as part of this change; it is not created automatically.
- The runner container's `compose.yaml` (maintained outside this repo at `~/docker/gitea/compose.yaml` on the VPS) must include the `- /srv/gitea-workspace:/srv/gitea-workspace` volume line. This is an out-of-band operational dependency; the prerequisite is documented in `runner-config.yaml`.
- `workdir_parent` applies to all jobs on this runner. Any future workflow that calls `docker compose` with relative bind mounts benefits automatically without further configuration.
- Job workspaces persist across runs under `/srv/gitea-workspace`. act_runner manages per-run subdirectory cleanup. Orphaned directories from interrupted runs should be cleaned up manually if disk space becomes a concern.
- Workflows that previously relied on `OBS_CONFIG_DIR` env var or the `obs-configs` stable path on the host no longer need those. Both were removed in this PR.
- This pattern does **not** apply to the `nsenter`-based Caddy reload step (ADR-012), which manages a host systemd service — a different problem class with no bind-mount equivalent.
## References
- ADR-011 — single-tenant runner trust model
- ADR-012 — nsenter via privileged container for host service management
- Issue #598 — original observability stack bind-mount failure
- `runner-config.yaml``workdir_parent`, `valid_volumes`, `options`

View File

@@ -1,57 +0,0 @@
# ADR-016: Observability stack co-location at `/opt/familienarchiv/` with CI-push config sync
## Status
Accepted
## Context
Issue #601 established that the observability stack must survive Gitea CI workspace wipes between nightly runs. When the nightly job completes, act_runner deletes the job workspace. Any Docker container that bind-mounts a config file from a workspace path (`/srv/gitea-workspace/…/infra/observability/prometheus/prometheus.yml`) then references a path that no longer exists on the host. On the next nightly run, Docker Compose either auto-creates an empty directory in its place (causing the container to fail to start because a file mount receives a directory) or finds a stale file from a previous run if the workspace happened to land at the same path.
ADR-015 solved the workspace bind-mount resolution problem: job workspaces are stored at `/srv/gitea-workspace` so `$(pwd)` inside the job container maps to a real host path. But it did not address persistence: the workspace is still wiped after the job, so bind mounts from workspace-relative paths remain fragile across runs.
### Decision drivers
1. Bind-mount sources must point to a host path that persists indefinitely, not to a path that disappears after each CI run.
2. Config files must reflect the committed state of the repo after every nightly run (no manual sync steps).
3. Secrets must not be written to the workspace or to any path managed by CI; they must survive independently of deployments.
4. The solution must not introduce new infrastructure dependencies (no SSH access from CI, no external registry, no additional server-side daemon).
### Alternatives considered
**A: Server-pull model** — a systemd timer or cron job on the server does `git pull` from the repo into `/opt/familienarchiv/` and then runs `docker compose up`. Rejected because: (1) requires git credentials on the server and a registered deploy key, (2) adds a second deployment mechanism that diverges from the CI-push model used for the main app stack, (3) timing coupling — the server pull must complete before CI's health checks run, requiring polling or a webhook.
**B: Separate directory (e.g. `/opt/obs/`)** — keeps obs configs isolated from the app stack. Rejected because: (1) the main app compose files are already in `/opt/familienarchiv/` (managed the same way), and (2) GlitchTip shares the `archive-db` PostgreSQL instance and `archiv-net` Docker network — it is architecturally part of the same deployment unit, not a separate one. Co-location reflects the actual coupling.
**C: Named Docker configs (Swarm)** — Docker Swarm supports first-class config objects that persist in the cluster. Rejected because the project does not use Swarm and introducing it solely for config persistence is a disproportionate dependency.
## Decision
The observability stack is co-located with the main application deployment at `/opt/familienarchiv/`:
- `docker-compose.observability.yml``/opt/familienarchiv/docker-compose.observability.yml`
- `infra/observability/``/opt/familienarchiv/infra/observability/`
Both the nightly CI job (`nightly.yml`) and the release job (`release.yml`) copy these files from the workspace checkout to `/opt/familienarchiv/` using `cp -r` on every run (CI-push model). Containers always read config from the permanent location; a workspace wipe has no effect on running containers.
Environment variables follow a two-source model:
- `infra/observability/obs.env` (git-tracked, non-secret): all non-sensitive config — host ports, public URLs (`GLITCHTIP_DOMAIN`, `GF_SERVER_ROOT_URL`), and the default `POSTGRES_HOST`. Changes go through PR review. No credentials.
- `/opt/familienarchiv/obs-secrets.env` (CI-written, per-deploy): passwords and secret keys only (`GRAFANA_ADMIN_PASSWORD`, `GLITCHTIP_SECRET_KEY`, `POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_HOST`), injected fresh from Gitea secrets on every nightly and release deploy. Gitea is the single source of truth for secrets — rotating a secret takes effect on the next deploy without manual server action.
Both files are passed explicitly via `--env-file` to every obs compose command (config dry-run and `up`). There is no implicit auto-read `.env`. The required key inventory is documented in `docs/DEPLOYMENT.md §4`.
The CI runner mounts `/opt/familienarchiv` as a bind mount into job containers (see `runner-config.yaml`). This requires a one-time `mkdir -p /opt/familienarchiv/infra` on the server and a runner restart after updating `runner-config.yaml` (see ADR-015 and `docs/DEPLOYMENT.md §3.1`).
## Consequences
**Positive:**
- Bind-mount sources survive workspace wipes by definition — they are on a persistent host path.
- Config is always in sync with the repo after each nightly run.
- No new infrastructure dependencies; the CI-push model mirrors how the main app stack is deployed.
- Secret rotation requires no manual server action — Gitea secrets are the authoritative store; `obs-secrets.env` is rewritten from scratch on every deploy so a secret change takes effect on the next nightly or release run.
**Negative:**
- `cp -r` does not remove deleted files; a config file removed from the repo persists in `/opt/familienarchiv/infra/observability/` until manually deleted. Acceptable for this project's change frequency. A `rsync -a --delete` would give a clean mirror if this becomes a problem.
- Mounting `/opt/familienarchiv/` into CI job containers expands the blast radius of a compromised workflow step — a malicious step could overwrite app compose files and Caddy config. Acceptable because the runner is single-tenant (trusted code only). See `runner-config.yaml` security comment.
- Runner must be restarted (`systemctl restart gitea-runner`) after any change to `runner-config.yaml` for the new mount to take effect.

View File

@@ -17,19 +17,6 @@ System_Boundary(archiv, "Familienarchiv (Docker Compose)") {
Container(mc, "Bucket / Service-Account Init", "MinIO Client (mc)", "One-shot container on startup. Idempotent: creates the archive bucket, the archiv-app service account, and attaches the readwrite policy.") Container(mc, "Bucket / Service-Account Init", "MinIO Client (mc)", "One-shot container on startup. Idempotent: creates the archive bucket, the archiv-app service account, and attaches the readwrite policy.")
} }
System_Boundary(observability, "Observability Stack (/opt/familienarchiv/docker-compose.observability.yml)") {
Container(prometheus, "Prometheus", "prom/prometheus:v3.4.0", "Scrapes metrics from backend management port 8081 (/actuator/prometheus), node-exporter, and cAdvisor. Retention: 30 days.")
Container(node_exporter, "Node Exporter", "prom/node-exporter:v1.9.0", "Host-level CPU, memory, disk, and network metrics.")
Container(cadvisor, "cAdvisor", "gcr.io/cadvisor/cadvisor:v0.52.1", "Per-container resource metrics.")
Container(loki, "Loki", "grafana/loki:3.4.2", "Stores log streams from all containers.")
Container(promtail, "Promtail", "grafana/promtail:3.4.2", "Ships Docker container logs to Loki via Docker SD.")
Container(tempo, "Tempo", "grafana/tempo:2.7.2", "Distributed trace storage. OTLP gRPC receiver on port 4317 (archiv-net). Grafana queries traces on port 3200 (obs-net). All ports internal only.")
Container(grafana, "Grafana", "grafana/grafana-oss:11.6.1", "Unified observability UI — dashboards, logs, traces. Datasources (Prometheus, Loki, Tempo) and three dashboards are auto-provisioned.")
Container(glitchtip, "GlitchTip", "glitchtip/glitchtip:6.1.6", "Sentry-compatible error tracker — web process. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces.")
Container(obs_glitchtip_worker, "GlitchTip Worker", "glitchtip/glitchtip:6.1.6", "Celery + beat worker — async event ingestion, notifications, cleanup.")
Container(obs_redis, "Redis", "redis:7-alpine", "Celery task queue for GlitchTip async workers.")
}
Rel(user, caddy, "HTTPS", "TLS 1.2/1.3") Rel(user, caddy, "HTTPS", "TLS 1.2/1.3")
Rel(caddy, frontend, "Reverse proxies non-/api requests", "HTTP / loopback:3000") Rel(caddy, frontend, "Reverse proxies non-/api requests", "HTTP / loopback:3000")
Rel(caddy, backend, "Reverse proxies /api/*", "HTTP / loopback:8080") Rel(caddy, backend, "Reverse proxies /api/*", "HTTP / loopback:8080")
@@ -41,12 +28,5 @@ Rel(backend, ocr, "OCR job requests with presigned MinIO URL", "HTTP / REST / JS
Rel(backend, mail, "Sends notification and password-reset emails (optional)", "SMTP") Rel(backend, mail, "Sends notification and password-reset emails (optional)", "SMTP")
Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned") Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
Rel(mc, storage, "Bootstraps bucket + service account on startup", "MinIO Client CLI") Rel(mc, storage, "Bootstraps bucket + service account on startup", "MinIO Client CLI")
Rel(promtail, loki, "Pushes log streams", "HTTP/Loki push API")
Rel(backend, tempo, "Sends distributed traces via OTLP", "gRPC / OTLP / port 4317 (archiv-net)")
Rel(grafana, prometheus, "Queries metrics", "HTTP 9090")
Rel(grafana, loki, "Queries logs", "HTTP 3100")
Rel(grafana, tempo, "Queries traces", "HTTP 3200")
Rel(glitchtip, db, "Stores error events in glitchtip DB", "PostgreSQL / archiv-net")
Rel(obs_glitchtip_worker, obs_redis, "Processes Celery tasks", "Redis / obs-net")
@enduml @enduml

View File

@@ -19,39 +19,6 @@ Both containers live in the `gitea_gitea` Docker network on the VPS. The runner
The `gitea-runner` container mounts the host Docker socket (`/var/run/docker.sock`). When a workflow job runs, act_runner spawns a **sibling container** for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`), enabling `docker compose` calls in workflow steps. The `gitea-runner` container mounts the host Docker socket (`/var/run/docker.sock`). When a workflow job runs, act_runner spawns a **sibling container** for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`), enabling `docker compose` calls in workflow steps.
### Workspace bind-mount setup (DooD path resolution)
When a workflow step calls `docker compose up` with relative bind-mount sources (e.g. `./infra/observability/prometheus/prometheus.yml`), Compose resolves them against `$(pwd)` inside the job container and passes the resulting **absolute path** to the host Docker daemon. The host daemon then tries to bind-mount that path from the **host filesystem**.
In the default DooD setup the job container's workspace lives in the act_runner overlay2 layer — the host has no directory at that path, auto-creates an empty one, and the container fails with:
```
error mounting "…/prometheus/prometheus.yml" to rootfs at "/etc/prometheus/prometheus.yml": not a directory
```
**Solution (ADR-015):** store job workspaces on a real host path and mount it at the **same absolute path** inside the runner and every job container. `runner-config.yaml` configures this via `workdir_parent`, `valid_volumes`, and `options`.
**One-time host setup** (required on any fresh VPS):
```bash
mkdir -p /srv/gitea-workspace
# Then add to the runner service in ~/docker/gitea/compose.yaml:
# volumes:
# - /srv/gitea-workspace:/srv/gitea-workspace
# Restart the runner container for the change to take effect.
```
The path `/srv/gitea-workspace` is the canonical workspace root. It must be identical on the host and inside job containers — if the paths differ, Compose still resolves to the container-internal path, which the host daemon cannot find (the original bug).
**Disk management:** act_runner cleans per-run subdirectories on completion. Orphaned directories from interrupted runs accumulate under `/srv/gitea-workspace` and should be pruned manually if disk space becomes a concern:
```bash
# List workspace directories older than 7 days
find /srv/gitea-workspace -mindepth 3 -maxdepth 3 -type d -mtime +7
```
---
### Running host-level commands from CI (nsenter pattern) ### Running host-level commands from CI (nsenter pattern)
Job containers are unprivileged and do not share the host's PID/mount/network namespaces. Commands like `systemctl` that target the host daemon are therefore unavailable by default. When a workflow step needs to manage a host service (e.g. `systemctl reload caddy`), it uses the Docker socket to spin up a **privileged sibling container** in the host PID namespace: Job containers are unprivileged and do not share the host's PID/mount/network namespaces. Commands like `systemctl` that target the host daemon are therefore unavailable by default. When a workflow step needs to manage a host service (e.g. `systemctl reload caddy`), it uses the Docker socket to spin up a **privileged sibling container** in the host PID namespace:
@@ -141,33 +108,6 @@ nsenter: failed to execute /bin/systemctl: No such file or directory
The first error means the Docker socket is not mounted into the job container — check `valid_volumes` in `/root/docker/gitea/runner-config.yaml` on the VPS. The second means the Alpine image is running but cannot enter the host mount namespace; verify `--privileged` and `--pid=host` are both present in the workflow step. The first error means the Docker socket is not mounted into the job container — check `valid_volumes` in `/root/docker/gitea/runner-config.yaml` on the VPS. The second means the Alpine image is running but cannot enter the host mount namespace; verify `--privileged` and `--pid=host` are both present in the workflow step.
**Failure mode 4 — workspace bind-mount not configured (observability stack or any compose-with-file-mounts job)**
Symptom in CI log:
```
Error response from daemon: error while creating mount source path "…/prometheus/prometheus.yml": mkdir …: not a directory
```
Or the service starts but immediately crashes because a config file was mounted as an empty directory.
Cause: `/srv/gitea-workspace` does not exist on the host, or the runner container's `compose.yaml` is missing the `- /srv/gitea-workspace:/srv/gitea-workspace` volume line.
Diagnosis:
```bash
ssh root@<vps>
ls -la /srv/gitea-workspace # must exist and be a directory
docker inspect gitea-runner | grep -A5 Mounts # must show /srv/gitea-workspace
```
Recovery:
```bash
mkdir -p /srv/gitea-workspace
# Add volume line to runner compose.yaml, then:
docker compose -f ~/docker/gitea/compose.yaml up -d gitea-runner
```
See `docs/DEPLOYMENT.md §3.1` and ADR-015 for the full setup rationale.
--- ---
## Gitea vs GitHub Actions Differences ## Gitea vs GitHub Actions Differences
@@ -260,7 +200,7 @@ jobs:
working-directory: frontend working-directory: frontend
- name: Upload screenshots - name: Upload screenshots
if: always() if: always()
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade. uses: actions/upload-artifact@v4 # ← upgraded from v3
with: with:
name: unit-test-screenshots name: unit-test-screenshots
path: frontend/test-results/screenshots/ path: frontend/test-results/screenshots/
@@ -287,7 +227,7 @@ jobs:
working-directory: backend working-directory: backend
- name: Upload test results - name: Upload test results
if: always() if: always()
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade. uses: actions/upload-artifact@v4 # ← upgraded from v3
with: with:
name: backend-test-results name: backend-test-results
path: backend/target/surefire-reports/ path: backend/target/surefire-reports/
@@ -389,7 +329,7 @@ jobs:
E2E_BACKEND_URL: http://localhost:8080 E2E_BACKEND_URL: http://localhost:8080
- name: Upload E2E results - name: Upload E2E results
if: always() if: always()
uses: actions/upload-artifact@v3 # pinned per ADR-014 — Gitea Actions does not implement v4 protocol. Do NOT upgrade. uses: actions/upload-artifact@v4 # ← upgraded from v3
with: with:
name: e2e-results name: e2e-results
path: frontend/test-results/e2e/ path: frontend/test-results/e2e/

View File

@@ -12,11 +12,11 @@ The original spec in this doc proposed an overlay pattern (`docker compose -f do
--- ---
## Observability stack ## Observability stack — not yet deployed
The observability stack (Prometheus, Loki, Grafana, Tempo, GlitchTip) ships as a separate `docker-compose.observability.yml` alongside the main stack. Configuration lives under `infra/observability/`. Prometheus, Loki, Grafana, Alertmanager, Uptime Kuma, GlitchTip and ntfy are **not** part of the production deployment that #497 landed. They are tracked as follow-up issue #498.
→ See [docs/DEPLOYMENT.md §4](../DEPLOYMENT.md#4-logs--observability) for the full setup procedure, service URLs, first-run steps, and env var reference. When that lands the observability containers will join `docker-compose.prod.yml` under a dedicated profile so they can be operated alongside the application stack without affecting the application containers' restart cycle.
--- ---

View File

@@ -165,7 +165,7 @@ npm run check # svelte-check (type checking)
```bash ```bash
npm run test # Vitest unit + server tests (headless) npm run test # Vitest unit + server tests (headless)
npm run test:coverage # Coverage report (server + client) npm run test:coverage # Coverage report (server project only)
npm run test:e2e # Playwright E2E tests npm run test:e2e # Playwright E2E tests
npm run test:e2e:headed # Playwright E2E with visible browser npm run test:e2e:headed # Playwright E2E with visible browser
npm run test:e2e:ui # Playwright UI mode npm run test:e2e:ui # Playwright UI mode

View File

@@ -29,6 +29,6 @@ ENV NODE_ENV=production
COPY --from=build /app/build ./build COPY --from=build /app/build ./build
COPY --from=build /app/package.json ./package.json COPY --from=build /app/package.json ./package.json
COPY --from=build /app/package-lock.json ./package-lock.json COPY --from=build /app/package-lock.json ./package-lock.json
RUN npm ci --omit=dev --ignore-scripts RUN npm ci --omit=dev
EXPOSE 3000 EXPOSE 3000
CMD ["node", "build"] CMD ["node", "build"]

View File

@@ -45,6 +45,7 @@ export default defineConfig(
files: ['**/*.svelte', '**/*.svelte.ts', '**/*.svelte.js'], files: ['**/*.svelte', '**/*.svelte.ts', '**/*.svelte.js'],
languageOptions: { languageOptions: {
parserOptions: { parserOptions: {
projectService: true,
extraFileExtensions: ['.svelte'], extraFileExtensions: ['.svelte'],
parser: ts.parser, parser: ts.parser,
svelteConfig svelteConfig

View File

@@ -345,11 +345,8 @@
"admin_system_import_btn_retry": "Erneut starten", "admin_system_import_btn_retry": "Erneut starten",
"admin_system_import_status_idle": "Kein Import gestartet.", "admin_system_import_status_idle": "Kein Import gestartet.",
"admin_system_import_status_running": "Import läuft…", "admin_system_import_status_running": "Import läuft…",
"admin_system_import_status_done": "Import abgeschlossen", "admin_system_import_status_done": "Import abgeschlossen {count} Dokumente verarbeitet.",
"admin_system_import_status_done_label": "Dokumente verarbeitet", "admin_system_import_status_failed": "Fehler: {message}",
"admin_system_import_status_failed": "Import fehlgeschlagen",
"admin_system_import_failed_no_spreadsheet": "Keine Tabellendatei gefunden.",
"admin_system_import_failed_internal": "Interner Fehler beim Import.",
"admin_system_thumbnails_heading": "Thumbnails erzeugen", "admin_system_thumbnails_heading": "Thumbnails erzeugen",
"admin_system_thumbnails_description": "Erzeugt Vorschaubilder für Dokumente ohne Thumbnail (z. B. nach dem Massenimport).", "admin_system_thumbnails_description": "Erzeugt Vorschaubilder für Dokumente ohne Thumbnail (z. B. nach dem Massenimport).",
"admin_system_thumbnails_btn_start": "Thumbnails erzeugen", "admin_system_thumbnails_btn_start": "Thumbnails erzeugen",
@@ -706,8 +703,6 @@
"error_invite_exhausted": "Dieser Einladungslink wurde bereits vollständig verwendet.", "error_invite_exhausted": "Dieser Einladungslink wurde bereits vollständig verwendet.",
"error_invite_revoked": "Dieser Einladungslink wurde deaktiviert.", "error_invite_revoked": "Dieser Einladungslink wurde deaktiviert.",
"error_invite_expired": "Dieser Einladungslink ist abgelaufen.", "error_invite_expired": "Dieser Einladungslink ist abgelaufen.",
"error_group_has_active_invites": "Diese Gruppe kann nicht gelöscht werden, da sie in einer aktiven Einladung verwendet wird.",
"error_group_not_found": "Die angegebene Gruppe existiert nicht.",
"register_heading": "Konto erstellen", "register_heading": "Konto erstellen",
"register_subtext": "Du wurdest eingeladen, dem Familienarchiv beizutreten.", "register_subtext": "Du wurdest eingeladen, dem Familienarchiv beizutreten.",
"register_label_first_name": "Vorname", "register_label_first_name": "Vorname",
@@ -767,9 +762,6 @@
"admin_new_invite_prefill_last": "Nachname vorausfüllen (optional)", "admin_new_invite_prefill_last": "Nachname vorausfüllen (optional)",
"admin_new_invite_prefill_email": "E-Mail vorausfüllen (optional)", "admin_new_invite_prefill_email": "E-Mail vorausfüllen (optional)",
"admin_new_invite_expires": "Ablaufdatum (optional)", "admin_new_invite_expires": "Ablaufdatum (optional)",
"admin_new_invite_groups": "Gruppen (optional)",
"admin_new_invite_no_groups": "Keine Gruppen vorhanden.",
"admin_invite_groups_load_error": "Gruppen konnten nicht geladen werden. Die Einladung kann ohne Gruppenauswahl erstellt werden.",
"admin_invite_created_title": "Einladung erstellt", "admin_invite_created_title": "Einladung erstellt",
"admin_invite_created_desc": "Teile diesen Link mit der einzuladenden Person:", "admin_invite_created_desc": "Teile diesen Link mit der einzuladenden Person:",
"admin_invite_revoke_confirm": "Einladung wirklich widerrufen?", "admin_invite_revoke_confirm": "Einladung wirklich widerrufen?",

View File

@@ -345,11 +345,8 @@
"admin_system_import_btn_retry": "Start again", "admin_system_import_btn_retry": "Start again",
"admin_system_import_status_idle": "No import started.", "admin_system_import_status_idle": "No import started.",
"admin_system_import_status_running": "Import running…", "admin_system_import_status_running": "Import running…",
"admin_system_import_status_done": "Import complete", "admin_system_import_status_done": "Import complete {count} documents processed.",
"admin_system_import_status_done_label": "Documents processed", "admin_system_import_status_failed": "Error: {message}",
"admin_system_import_status_failed": "Import failed",
"admin_system_import_failed_no_spreadsheet": "No spreadsheet file found.",
"admin_system_import_failed_internal": "Import failed due to an internal error.",
"admin_system_thumbnails_heading": "Generate thumbnails", "admin_system_thumbnails_heading": "Generate thumbnails",
"admin_system_thumbnails_description": "Generates preview images for documents without a thumbnail (e.g. after the mass import).", "admin_system_thumbnails_description": "Generates preview images for documents without a thumbnail (e.g. after the mass import).",
"admin_system_thumbnails_btn_start": "Generate thumbnails", "admin_system_thumbnails_btn_start": "Generate thumbnails",
@@ -706,8 +703,6 @@
"error_invite_exhausted": "This invite link has already been fully used.", "error_invite_exhausted": "This invite link has already been fully used.",
"error_invite_revoked": "This invite link has been deactivated.", "error_invite_revoked": "This invite link has been deactivated.",
"error_invite_expired": "This invite link has expired.", "error_invite_expired": "This invite link has expired.",
"error_group_has_active_invites": "This group cannot be deleted because it is referenced by one or more active invite links.",
"error_group_not_found": "The specified group does not exist.",
"register_heading": "Create account", "register_heading": "Create account",
"register_subtext": "You've been invited to join Familienarchiv.", "register_subtext": "You've been invited to join Familienarchiv.",
"register_label_first_name": "First name", "register_label_first_name": "First name",
@@ -767,9 +762,6 @@
"admin_new_invite_prefill_last": "Pre-fill last name (optional)", "admin_new_invite_prefill_last": "Pre-fill last name (optional)",
"admin_new_invite_prefill_email": "Pre-fill email (optional)", "admin_new_invite_prefill_email": "Pre-fill email (optional)",
"admin_new_invite_expires": "Expiry date (optional)", "admin_new_invite_expires": "Expiry date (optional)",
"admin_new_invite_groups": "Groups (optional)",
"admin_new_invite_no_groups": "No groups exist.",
"admin_invite_groups_load_error": "Groups could not be loaded. The invite can still be created without group assignment.",
"admin_invite_created_title": "Invite created", "admin_invite_created_title": "Invite created",
"admin_invite_created_desc": "Share this link with the person you are inviting:", "admin_invite_created_desc": "Share this link with the person you are inviting:",
"admin_invite_revoke_confirm": "Really revoke this invite?", "admin_invite_revoke_confirm": "Really revoke this invite?",

View File

@@ -345,11 +345,8 @@
"admin_system_import_btn_retry": "Iniciar de nuevo", "admin_system_import_btn_retry": "Iniciar de nuevo",
"admin_system_import_status_idle": "No hay importación iniciada.", "admin_system_import_status_idle": "No hay importación iniciada.",
"admin_system_import_status_running": "Importación en curso…", "admin_system_import_status_running": "Importación en curso…",
"admin_system_import_status_done": "Importación completada", "admin_system_import_status_done": "Importación completada {count} documentos procesados.",
"admin_system_import_status_done_label": "Documentos procesados", "admin_system_import_status_failed": "Error: {message}",
"admin_system_import_status_failed": "Importación fallida",
"admin_system_import_failed_no_spreadsheet": "No se encontró ninguna hoja de cálculo.",
"admin_system_import_failed_internal": "Error interno durante la importación.",
"admin_system_thumbnails_heading": "Generar miniaturas", "admin_system_thumbnails_heading": "Generar miniaturas",
"admin_system_thumbnails_description": "Genera imágenes de vista previa para documentos sin miniatura (p. ej. tras la importación masiva).", "admin_system_thumbnails_description": "Genera imágenes de vista previa para documentos sin miniatura (p. ej. tras la importación masiva).",
"admin_system_thumbnails_btn_start": "Generar miniaturas", "admin_system_thumbnails_btn_start": "Generar miniaturas",
@@ -706,8 +703,6 @@
"error_invite_exhausted": "Este enlace de invitación ya ha sido completamente utilizado.", "error_invite_exhausted": "Este enlace de invitación ya ha sido completamente utilizado.",
"error_invite_revoked": "Este enlace de invitación ha sido desactivado.", "error_invite_revoked": "Este enlace de invitación ha sido desactivado.",
"error_invite_expired": "Este enlace de invitación ha expirado.", "error_invite_expired": "Este enlace de invitación ha expirado.",
"error_group_has_active_invites": "Este grupo no puede eliminarse porque está referenciado por uno o más enlaces de invitación activos.",
"error_group_not_found": "El grupo especificado no existe.",
"register_heading": "Crear cuenta", "register_heading": "Crear cuenta",
"register_subtext": "Has sido invitado a unirte al Familienarchiv.", "register_subtext": "Has sido invitado a unirte al Familienarchiv.",
"register_label_first_name": "Nombre", "register_label_first_name": "Nombre",
@@ -767,9 +762,6 @@
"admin_new_invite_prefill_last": "Prellenar apellido (opcional)", "admin_new_invite_prefill_last": "Prellenar apellido (opcional)",
"admin_new_invite_prefill_email": "Prellenar correo (opcional)", "admin_new_invite_prefill_email": "Prellenar correo (opcional)",
"admin_new_invite_expires": "Fecha de vencimiento (opcional)", "admin_new_invite_expires": "Fecha de vencimiento (opcional)",
"admin_new_invite_groups": "Grupos (opcional)",
"admin_new_invite_no_groups": "No hay grupos disponibles.",
"admin_invite_groups_load_error": "No se pudieron cargar los grupos. La invitación puede crearse sin asignar grupos.",
"admin_invite_created_title": "Invitación creada", "admin_invite_created_title": "Invitación creada",
"admin_invite_created_desc": "Comparte este enlace con la persona invitada:", "admin_invite_created_desc": "Comparte este enlace con la persona invitada:",
"admin_invite_revoke_confirm": "¿Realmente revocar esta invitación?", "admin_invite_revoke_confirm": "¿Realmente revocar esta invitación?",

File diff suppressed because it is too large Load Diff

View File

@@ -16,14 +16,13 @@
"lint:boundary-demo": "eslint src/lib/tag/__fixtures__/", "lint:boundary-demo": "eslint src/lib/tag/__fixtures__/",
"test:unit": "vitest", "test:unit": "vitest",
"test": "npm run test:unit -- --run", "test": "npm run test:unit -- --run",
"test:coverage": "vitest run --coverage --project=server; vitest run -c vitest.client-coverage.config.ts --coverage", "test:coverage": "vitest run --coverage --project=server && vitest run -c vitest.client-coverage.config.ts --coverage",
"test:e2e": "playwright test", "test:e2e": "playwright test",
"test:e2e:headed": "playwright test --headed", "test:e2e:headed": "playwright test --headed",
"test:e2e:ui": "playwright test --ui", "test:e2e:ui": "playwright test --ui",
"generate:api": "openapi-typescript http://localhost:8080/v3/api-docs -o ./src/lib/generated/api.ts" "generate:api": "openapi-typescript http://localhost:8080/v3/api-docs -o ./src/lib/generated/api.ts"
}, },
"dependencies": { "dependencies": {
"@sentry/sveltekit": "^10.53.1",
"@tiptap/core": "3.22.5", "@tiptap/core": "3.22.5",
"@tiptap/extension-mention": "3.22.5", "@tiptap/extension-mention": "3.22.5",
"@tiptap/starter-kit": "3.22.5", "@tiptap/starter-kit": "3.22.5",

View File

@@ -1,10 +0,0 @@
import * as Sentry from '@sentry/sveltekit';
Sentry.init({
dsn: import.meta.env.VITE_SENTRY_DSN,
environment: import.meta.env.MODE,
tracesSampleRate: 1.0,
enabled: !!import.meta.env.VITE_SENTRY_DSN
});
export const handleError = Sentry.handleErrorWithSentry();

View File

@@ -1,4 +1,3 @@
import * as Sentry from '@sentry/sveltekit';
import { redirect, type Handle, type HandleFetch } from '@sveltejs/kit'; import { redirect, type Handle, type HandleFetch } from '@sveltejs/kit';
import { paraglideMiddleware } from '$lib/paraglide/server'; import { paraglideMiddleware } from '$lib/paraglide/server';
import { sequence } from '@sveltejs/kit/hooks'; import { sequence } from '@sveltejs/kit/hooks';
@@ -6,13 +5,6 @@ import { env } from 'process';
import { cookieName, cookieMaxAge } from '$lib/paraglide/runtime'; import { cookieName, cookieMaxAge } from '$lib/paraglide/runtime';
import { detectLocale } from '$lib/shared/server/locale'; import { detectLocale } from '$lib/shared/server/locale';
Sentry.init({
dsn: import.meta.env.VITE_SENTRY_DSN,
environment: import.meta.env.MODE,
tracesSampleRate: 1.0,
enabled: !!import.meta.env.VITE_SENTRY_DSN
});
const PUBLIC_PATHS = [ const PUBLIC_PATHS = [
'/login', '/login',
'/logout', '/logout',
@@ -121,5 +113,3 @@ export const handleFetch: HandleFetch = async ({ event, request, fetch }) => {
}; };
export const handle = sequence(userGroup, handleAuth, handleLocaleDetection, handleParaglide); export const handle = sequence(userGroup, handleAuth, handleLocaleDetection, handleParaglide);
export const handleError = Sentry.handleErrorWithSentry();

View File

@@ -22,8 +22,6 @@ export type ErrorCode =
| 'INVITE_EXHAUSTED' | 'INVITE_EXHAUSTED'
| 'INVITE_REVOKED' | 'INVITE_REVOKED'
| 'INVITE_EXPIRED' | 'INVITE_EXPIRED'
| 'GROUP_HAS_ACTIVE_INVITES'
| 'GROUP_NOT_FOUND'
| 'ANNOTATION_NOT_FOUND' | 'ANNOTATION_NOT_FOUND'
| 'ANNOTATION_UPDATE_FAILED' | 'ANNOTATION_UPDATE_FAILED'
| 'TRANSCRIPTION_BLOCK_NOT_FOUND' | 'TRANSCRIPTION_BLOCK_NOT_FOUND'
@@ -110,10 +108,6 @@ export function getErrorMessage(code: ErrorCode | string | undefined): string {
return m.error_invite_revoked(); return m.error_invite_revoked();
case 'INVITE_EXPIRED': case 'INVITE_EXPIRED':
return m.error_invite_expired(); return m.error_invite_expired();
case 'GROUP_HAS_ACTIVE_INVITES':
return m.error_group_has_active_invites();
case 'GROUP_NOT_FOUND':
return m.error_group_not_found();
case 'ANNOTATION_NOT_FOUND': case 'ANNOTATION_NOT_FOUND':
return m.error_annotation_not_found(); return m.error_annotation_not_found();
case 'ANNOTATION_UPDATE_FAILED': case 'ANNOTATION_UPDATE_FAILED':

View File

@@ -1,7 +1,4 @@
<script lang="ts"> <script lang="ts">
import { untrack } from 'svelte';
import { m } from '$lib/paraglide/messages.js';
let { let {
groups, groups,
selectedGroupIds = [] selectedGroupIds = []
@@ -10,13 +7,12 @@ let {
selectedGroupIds?: string[]; selectedGroupIds?: string[];
} = $props(); } = $props();
let selected = $state<string[]>(untrack(() => [...selectedGroupIds])); let selected = $derived([...selectedGroupIds]);
</script> </script>
<fieldset class="flex flex-wrap gap-3 border-none p-0"> <div class="flex flex-wrap gap-3">
<legend class="sr-only">{m.admin_new_invite_groups()}</legend>
{#each groups as group (group.id)} {#each groups as group (group.id)}
<label class="inline-flex min-h-[44px] items-center gap-2 text-sm text-ink-2"> <label class="inline-flex items-center gap-2 text-sm text-ink-2">
<input <input
type="checkbox" type="checkbox"
name="groupIds" name="groupIds"
@@ -27,4 +23,4 @@ let selected = $state<string[]>(untrack(() => [...selectedGroupIds]));
{group.name} {group.name}
</label> </label>
{/each} {/each}
</fieldset> </div>

View File

@@ -1,8 +1,7 @@
<script lang="ts"> <script lang="ts">
import { enhance } from '$app/forms'; import { enhance } from '$app/forms';
import { beforeNavigate, goto } from '$app/navigation';
import { m } from '$lib/paraglide/messages.js'; import { m } from '$lib/paraglide/messages.js';
import { createUnsavedWarning } from '$lib/shared/hooks/useUnsavedWarning.svelte';
import UnsavedWarningBanner from '$lib/shared/primitives/UnsavedWarningBanner.svelte';
const availableStandard = $derived([ const availableStandard = $derived([
{ value: 'READ_ALL', label: m.admin_perm_read_all() }, { value: 'READ_ALL', label: m.admin_perm_read_all() },
@@ -19,7 +18,17 @@ const availableAdmin = $derived([
let { form } = $props(); let { form } = $props();
const unsaved = createUnsavedWarning(); let isDirty = $state(false);
let showUnsavedWarning = $state(false);
let discardTarget: string | null = $state(null);
beforeNavigate(({ cancel, to }) => {
if (isDirty) {
cancel();
showUnsavedWarning = true;
discardTarget = to?.url.href ?? null;
}
});
</script> </script>
<div class="flex flex-1 flex-col overflow-hidden"> <div class="flex flex-1 flex-col overflow-hidden">
@@ -49,8 +58,23 @@ const unsaved = createUnsavedWarning();
<!-- Scrollable body --> <!-- Scrollable body -->
<div class="flex-1 overflow-y-auto px-5 py-5"> <div class="flex-1 overflow-y-auto px-5 py-5">
{#if unsaved.showUnsavedWarning} {#if showUnsavedWarning}
<UnsavedWarningBanner onDiscard={unsaved.discard} /> <div
class="mb-5 flex items-center justify-between rounded border border-amber-200 bg-amber-50 p-3 text-sm text-amber-800 dark:border-amber-800 dark:bg-amber-950/40 dark:text-amber-300"
>
<span>{m.admin_unsaved_warning()}</span>
<button
type="button"
onclick={() => {
isDirty = false;
showUnsavedWarning = false;
if (discardTarget) goto(discardTarget);
}}
class="ml-4 shrink-0 font-sans text-xs font-bold tracking-widest text-amber-800 uppercase hover:text-amber-900 dark:text-amber-300"
>
{m.person_discard_changes()}
</button>
</div>
{/if} {/if}
{#if form?.error} {#if form?.error}
<div class="mb-5 rounded border border-red-200 bg-red-50 p-3 text-sm text-red-700"> <div class="mb-5 rounded border border-red-200 bg-red-50 p-3 text-sm text-red-700">
@@ -61,11 +85,11 @@ const unsaved = createUnsavedWarning();
<form <form
id="new-group-form" id="new-group-form"
method="POST" method="POST"
use:enhance={() => async ({ result, update }) => { use:enhance
if (result.type === 'redirect') unsaved.clearOnSuccess(); oninput={() => {
await update(); isDirty = true;
showUnsavedWarning = false;
}} }}
oninput={unsaved.markDirty}
class="space-y-5" class="space-y-5"
> >
<!-- Name card --> <!-- Name card -->

View File

@@ -1,125 +0,0 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser';
import Page from './+page.svelte';
const enhanceCaptureRef = vi.hoisted(() => ({ submitFn: undefined as unknown }));
vi.mock('$app/forms', () => ({
enhance: (_el: HTMLFormElement, fn?: unknown) => {
enhanceCaptureRef.submitFn = fn;
return { destroy: vi.fn() };
}
}));
vi.mock('$app/navigation', () => ({ beforeNavigate: vi.fn(), goto: vi.fn() }));
import { beforeNavigate, goto } from '$app/navigation';
afterEach(cleanup);
type SubmitFn = () => Promise<
(opts: {
result: { type: string; [key: string]: unknown };
update: () => Promise<void>;
}) => Promise<void>
>;
// ─── Unsaved-changes guard ────────────────────────────────────────────────────
describe('Admin new group page unsaved-changes guard', () => {
beforeEach(() => {
vi.clearAllMocks();
enhanceCaptureRef.submitFn = undefined;
});
it('does not show unsaved warning initially', async () => {
render(Page, { props: { form: null } });
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
});
it('cancels navigation and shows banner when form is dirty', async () => {
render(Page, { props: { form: null } });
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="name"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
const cancel = vi.fn();
callback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
expect(cancel).toHaveBeenCalled();
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
});
it('does not cancel navigation when form is clean', async () => {
render(Page, { props: { form: null } });
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
const cancel = vi.fn();
callback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
expect(cancel).not.toHaveBeenCalled();
});
it('discard button calls goto with the target URL', async () => {
render(Page, { props: { form: null } });
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="name"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
callback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/groups') } });
await page.getByRole('button', { name: /verwerfen/i }).click();
expect(vi.mocked(goto)).toHaveBeenCalledWith('http://localhost/admin/groups');
});
it('clears banner when enhance callback receives a redirect result', async () => {
render(Page, { props: { form: null } });
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="name"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/groups') } });
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
await innerFn({
result: { type: 'redirect', location: '/admin/groups', status: 303 },
update: vi.fn().mockResolvedValue(undefined)
});
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
const cancel = vi.fn();
navCallback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
expect(cancel).not.toHaveBeenCalled();
});
it('keeps banner when enhance callback receives a failure result', async () => {
render(Page, { props: { form: null } });
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="name"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/groups') } });
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
await innerFn({
result: { type: 'failure', status: 400, data: { error: 'Name bereits vergeben' } },
update: vi.fn().mockResolvedValue(undefined)
});
const cancel = vi.fn();
navCallback({ cancel, to: { url: new URL('http://localhost/admin/groups') } });
expect(cancel).toHaveBeenCalled();
});
});

View File

@@ -2,7 +2,6 @@ import { fail } from '@sveltejs/kit';
import { env } from '$env/dynamic/private'; import { env } from '$env/dynamic/private';
import { parseBackendError } from '$lib/shared/errors'; import { parseBackendError } from '$lib/shared/errors';
import type { Actions, PageServerLoad } from './$types'; import type { Actions, PageServerLoad } from './$types';
import type { components } from '$lib/generated/api';
export interface InviteListItem { export interface InviteListItem {
id: string; id: string;
@@ -18,37 +17,22 @@ export interface InviteListItem {
shareableUrl: string; shareableUrl: string;
} }
export type UserGroup = components['schemas']['UserGroup'];
export const load: PageServerLoad = async ({ url, fetch }) => { export const load: PageServerLoad = async ({ url, fetch }) => {
const status = url.searchParams.get('status') ?? 'active'; const status = url.searchParams.get('status') ?? 'active';
const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080'; const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
const res = await fetch(`${apiUrl}/api/invites?status=${encodeURIComponent(status)}`);
const [invitesRes, groupsRes] = await Promise.all([ if (!res.ok) {
fetch(`${apiUrl}/api/invites?status=${encodeURIComponent(status)}`), const backendError = await parseBackendError(res);
fetch(`${apiUrl}/api/groups`) return {
]); invites: [] as InviteListItem[],
status,
let invites: InviteListItem[] = []; loadError: backendError?.code ?? 'INTERNAL_ERROR'
let loadError: string | null = null; };
if (!invitesRes.ok) {
const backendError = await parseBackendError(invitesRes);
loadError = backendError?.code ?? 'INTERNAL_ERROR';
} else {
invites = await invitesRes.json();
} }
let groups: UserGroup[] = []; const invites: InviteListItem[] = await res.json();
let groupsLoadError: string | null = null; return { invites, status, loadError: null };
if (!groupsRes.ok) {
const backendError = await parseBackendError(groupsRes);
groupsLoadError = backendError?.code ?? 'INTERNAL_ERROR';
} else {
const raw: UserGroup[] = await groupsRes.json();
groups = [...raw].sort((a, b) => a.name.localeCompare(b.name));
}
return { invites, status, loadError, groups, groupsLoadError };
}; };
export const actions = { export const actions = {
@@ -61,7 +45,6 @@ export const actions = {
const prefillLastName = (formData.get('prefillLastName') as string) || undefined; const prefillLastName = (formData.get('prefillLastName') as string) || undefined;
const prefillEmail = (formData.get('prefillEmail') as string) || undefined; const prefillEmail = (formData.get('prefillEmail') as string) || undefined;
const expiresAt = (formData.get('expiresAt') as string) || undefined; const expiresAt = (formData.get('expiresAt') as string) || undefined;
const groupIds = formData.getAll('groupIds') as string[];
const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080'; const apiUrl = env.API_INTERNAL_URL || 'http://localhost:8080';
const res = await fetch(`${apiUrl}/api/invites`, { const res = await fetch(`${apiUrl}/api/invites`, {
@@ -73,8 +56,7 @@ export const actions = {
prefillFirstName, prefillFirstName,
prefillLastName, prefillLastName,
prefillEmail, prefillEmail,
expiresAt, expiresAt
groupIds
}) })
}); });

View File

@@ -2,8 +2,7 @@
import { enhance } from '$app/forms'; import { enhance } from '$app/forms';
import { m } from '$lib/paraglide/messages.js'; import { m } from '$lib/paraglide/messages.js';
import { getErrorMessage } from '$lib/shared/errors'; import { getErrorMessage } from '$lib/shared/errors';
import UserGroupsSection from '$lib/user/UserGroupsSection.svelte'; import type { InviteListItem } from './+page.server.ts';
import type { InviteListItem, UserGroup } from './+page.server.ts';
let { let {
data, data,
@@ -13,8 +12,6 @@ let {
invites: InviteListItem[]; invites: InviteListItem[];
status: string; status: string;
loadError: string | null; loadError: string | null;
groups: UserGroup[];
groupsLoadError: string | null;
}; };
form?: { form?: {
createError?: string; createError?: string;
@@ -256,23 +253,6 @@ function statusIcon(status: string) {
class="block w-full border border-line px-3 py-2 font-serif text-sm text-ink focus:outline-none focus-visible:ring-2 focus-visible:ring-focus-ring" class="block w-full border border-line px-3 py-2 font-serif text-sm text-ink focus:outline-none focus-visible:ring-2 focus-visible:ring-focus-ring"
/> />
</div> </div>
<div class="sm:col-span-2">
<p class="mb-2 font-sans text-xs font-bold tracking-widest text-ink-3 uppercase">
{m.admin_new_invite_groups()}
</p>
{#if data.groupsLoadError}
<div
role="alert"
class="rounded-sm border border-amber-200 bg-amber-50 px-3 py-2 font-sans text-xs text-amber-700"
>
{m.admin_invite_groups_load_error()}
</div>
{:else if data.groups.length === 0}
<p class="font-sans text-xs text-ink-3 italic">{m.admin_new_invite_no_groups()}</p>
{:else}
<UserGroupsSection groups={data.groups} />
{/if}
</div>
{#if form?.createError} {#if form?.createError}
<div class="font-sans text-xs font-medium text-red-600 sm:col-span-2"> <div class="font-sans text-xs font-medium text-red-600 sm:col-span-2">
{getErrorMessage(form.createError)} {getErrorMessage(form.createError)}

View File

@@ -1,155 +0,0 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
vi.mock('$env/dynamic/private', () => ({
env: { API_INTERNAL_URL: 'http://localhost:8080' }
}));
import { load, actions } from './+page.server';
import type { UserGroup } from './+page.server';
// PageServerLoad annotates the return as `void | (...)`. This explicit shape avoids
// the void and the Record<string, any> from the generic constraint.
type LoadData = {
invites: unknown[];
status: string;
loadError: string | null;
groups: UserGroup[];
groupsLoadError: string | null;
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type AnyFetch = (...args: any[]) => any;
function mockResponse(ok: boolean, body: unknown, status = 200) {
return {
ok,
status,
json: async () => body,
text: async () => JSON.stringify(body),
headers: new Headers({ 'content-type': 'application/json' })
} as unknown as Response;
}
describe('admin/invites load()', () => {
const mockFetch = vi.fn<AnyFetch>();
beforeEach(() => mockFetch.mockReset());
function event(status = 'active') {
return {
url: new URL(`http://localhost/admin/invites?status=${status}`),
fetch: mockFetch as unknown as typeof fetch
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any;
}
it('returns groups array alongside invites when both succeed', async () => {
mockFetch.mockResolvedValueOnce(mockResponse(true, [])).mockResolvedValueOnce(
mockResponse(true, [
{ id: 'g-1', name: 'Familie', permissions: ['READ_ALL'] },
{ id: 'g-2', name: 'Administratoren', permissions: ['ADMIN'] }
])
);
const result = (await load(event())) as LoadData;
expect(result.groups).toHaveLength(2);
expect(result.groupsLoadError).toBeNull();
});
it('returns groups sorted alphabetically by name', async () => {
mockFetch.mockResolvedValueOnce(mockResponse(true, [])).mockResolvedValueOnce(
mockResponse(true, [
{ id: 'g-1', name: 'Zebra', permissions: [] },
{ id: 'g-2', name: 'Alfa', permissions: [] },
{ id: 'g-3', name: 'Mitte', permissions: [] }
])
);
const result = (await load(event())) as LoadData;
expect(result.groups.map((g) => g.name)).toEqual(['Alfa', 'Mitte', 'Zebra']);
});
it('returns groups: [] and non-null groupsLoadError when groups fetch is non-OK', async () => {
mockFetch
.mockResolvedValueOnce(mockResponse(true, []))
.mockResolvedValueOnce(mockResponse(false, { code: 'FORBIDDEN' }, 403));
const result = (await load(event())) as LoadData;
expect(result.groups).toEqual([]);
expect(result.groupsLoadError).toBe('FORBIDDEN');
});
it('falls back to INTERNAL_ERROR when groups error body has no code', async () => {
mockFetch
.mockResolvedValueOnce(mockResponse(true, []))
.mockResolvedValueOnce(mockResponse(false, null, 500));
const result = (await load(event())) as LoadData;
expect(result.groupsLoadError).toBe('INTERNAL_ERROR');
});
it('fetches invites and groups in parallel (both URLs called)', async () => {
mockFetch
.mockResolvedValueOnce(mockResponse(true, []))
.mockResolvedValueOnce(mockResponse(true, []));
await load(event());
expect(mockFetch).toHaveBeenCalledTimes(2);
expect(mockFetch).toHaveBeenCalledWith(expect.stringContaining('/api/invites'));
expect(mockFetch).toHaveBeenCalledWith(expect.stringContaining('/api/groups'));
});
});
describe('admin/invites create action', () => {
const mockFetch = vi.fn<AnyFetch>();
beforeEach(() => mockFetch.mockReset());
const successBody = {
id: 'inv-1',
code: 'ABCDE12345',
displayCode: 'ABCDE-12345',
status: 'active',
revoked: false,
useCount: 0,
createdAt: '2026-01-01T00:00:00Z',
shareableUrl: 'http://localhost/register?code=ABCDE12345'
};
it('includes groupIds array in POST body when checkboxes are checked', async () => {
const fd = new FormData();
fd.append('groupIds', 'g-1');
fd.append('groupIds', 'g-2');
mockFetch.mockResolvedValueOnce(mockResponse(true, successBody, 201));
await actions.create({
request: new Request('http://localhost', { method: 'POST', body: fd }),
fetch: mockFetch as unknown as typeof fetch
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any);
const [, init] = mockFetch.mock.calls[0] as [string, RequestInit];
const sent = JSON.parse(init.body as string);
expect(sent.groupIds).toEqual(['g-1', 'g-2']);
});
it('sends groupIds: [] when no checkboxes are checked', async () => {
const fd = new FormData();
mockFetch.mockResolvedValueOnce(mockResponse(true, successBody, 201));
await actions.create({
request: new Request('http://localhost', { method: 'POST', body: fd }),
fetch: mockFetch as unknown as typeof fetch
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any);
const [, init] = mockFetch.mock.calls[0] as [string, RequestInit];
const sent = JSON.parse(init.body as string);
expect(sent.groupIds).toEqual([]);
});
});

View File

@@ -7,15 +7,12 @@ afterEach(cleanup);
const makeInvite = (overrides: Record<string, unknown> = {}) => ({ const makeInvite = (overrides: Record<string, unknown> = {}) => ({
id: 'i-1', id: 'i-1',
code: 'XYZ1234567',
displayCode: 'XYZ-1234', displayCode: 'XYZ-1234',
label: 'Familie', label: 'Familie',
useCount: 0, useCount: 0,
maxUses: 5, maxUses: 5,
expiresAt: '2027-01-01T00:00:00Z', expiresAt: '2027-01-01T00:00:00Z',
revoked: false,
status: 'active' as string, status: 'active' as string,
createdAt: '2025-01-01T00:00:00Z',
shareableUrl: 'http://example.com/i/i-1', shareableUrl: 'http://example.com/i/i-1',
...overrides ...overrides
}); });
@@ -25,15 +22,11 @@ const baseData = (
invites: ReturnType<typeof makeInvite>[]; invites: ReturnType<typeof makeInvite>[];
status: string; status: string;
loadError: string | null; loadError: string | null;
groups: { id: string; name: string; permissions: string[] }[];
groupsLoadError: string | null;
}> = {} }> = {}
) => ({ ) => ({
invites: [], invites: [],
status: 'active', status: 'active',
loadError: null, loadError: null,
groups: [],
groupsLoadError: null,
...overrides ...overrides
}); });
@@ -260,115 +253,4 @@ describe('admin/invites page', () => {
const banner = document.querySelector('.bg-red-50'); const banner = document.querySelector('.bg-red-50');
expect(banner).not.toBeNull(); expect(banner).not.toBeNull();
}); });
// ─── groups section ───────────────────────────────────────────────────────
it('shows a groups-load warning banner when data.groupsLoadError is set', async () => {
render(AdminInvitesPage, {
props: { data: { ...baseData(), groups: [], groupsLoadError: 'INTERNAL_ERROR' } }
});
await page
.getByRole('button', { name: /neue einladung/i })
.first()
.click();
const banner = document.querySelector('.bg-amber-50');
expect(banner).not.toBeNull();
});
it('renders group checkboxes inside the new-invite form when groups are provided', async () => {
render(AdminInvitesPage, {
props: {
data: {
...baseData(),
groups: [
{ id: 'g-1', name: 'Administratoren', permissions: ['ADMIN'] },
{ id: 'g-2', name: 'Familie', permissions: ['READ_ALL'] }
],
groupsLoadError: null
}
}
});
await page
.getByRole('button', { name: /neue einladung/i })
.first()
.click();
await expect.element(page.getByRole('checkbox', { name: 'Administratoren' })).toBeVisible();
await expect.element(page.getByRole('checkbox', { name: 'Familie' })).toBeVisible();
});
it('group checkbox stays checked after being clicked', async () => {
render(AdminInvitesPage, {
props: {
data: {
...baseData(),
groups: [{ id: 'g-1', name: 'Familie', permissions: ['READ_ALL'] }],
groupsLoadError: null
}
}
});
await page
.getByRole('button', { name: /neue einladung/i })
.first()
.click();
const checkbox = page.getByRole('checkbox', { name: 'Familie' });
await checkbox.click();
await expect.element(checkbox).toBeChecked();
});
it('amber warning banner has role="alert"', async () => {
render(AdminInvitesPage, {
props: { data: { ...baseData(), groups: [], groupsLoadError: 'INTERNAL_ERROR' } }
});
await page
.getByRole('button', { name: /neue einladung/i })
.first()
.click();
const alert = document.querySelector('[role="alert"]');
expect(alert).not.toBeNull();
});
it('checkbox group fieldset has accessible name from i18n key (not hardcoded German)', async () => {
render(AdminInvitesPage, {
props: {
data: {
...baseData(),
groups: [{ id: 'g-1', name: 'Familie', permissions: ['READ_ALL'] }],
groupsLoadError: null
}
}
});
await page
.getByRole('button', { name: /neue einladung/i })
.first()
.click();
// m.admin_new_invite_groups() returns "Gruppen (optional)" in de locale
// The hardcoded legend "Gruppen" would not match this accessible name
await expect.element(page.getByRole('group', { name: 'Gruppen (optional)' })).toBeVisible();
});
it('shows no checkboxes and no warning when groups list is empty and no error', async () => {
render(AdminInvitesPage, {
props: { data: { ...baseData(), groups: [], groupsLoadError: null } }
});
await page
.getByRole('button', { name: /neue einladung/i })
.first()
.click();
expect(document.querySelectorAll('input[name="groupIds"]')).toHaveLength(0);
expect(document.querySelector('.bg-amber-50')).toBeNull();
// empty-state message visible — "Keine Gruppen vorhanden." in de locale
await expect.element(page.getByText(/keine gruppen/i)).toBeVisible();
});
}); });

View File

@@ -1,14 +1,19 @@
<script lang="ts"> <script lang="ts">
import { onDestroy } from 'svelte'; import { onDestroy } from 'svelte';
import { m } from '$lib/paraglide/messages.js'; import { m } from '$lib/paraglide/messages.js';
import ImportStatusCard from './ImportStatusCard.svelte';
import type { ImportStatus } from './types.js';
let backfillResult: number | null = $state(null); let backfillResult: number | null = $state(null);
let backfillLoading = $state(false); let backfillLoading = $state(false);
let backfillHashesResult: number | null = $state(null); let backfillHashesResult: number | null = $state(null);
let backfillHashesLoading = $state(false); let backfillHashesLoading = $state(false);
type ImportStatus = {
state: 'IDLE' | 'RUNNING' | 'DONE' | 'FAILED';
message: string;
processed: number;
startedAt: string | null;
};
type ThumbnailStatus = { type ThumbnailStatus = {
state: 'IDLE' | 'RUNNING' | 'DONE' | 'FAILED'; state: 'IDLE' | 'RUNNING' | 'DONE' | 'FAILED';
message: string; message: string;
@@ -172,7 +177,47 @@ async function backfillFileHashes() {
</div> </div>
<!-- Mass import --> <!-- Mass import -->
<ImportStatusCard importStatus={importStatus} ontrigger={triggerImport} /> <div class="rounded-sm border border-line bg-surface p-6 shadow-sm">
<h2 class="mb-1 font-sans text-sm font-bold text-ink">{m.admin_system_import_heading()}</h2>
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_description()}</p>
{#if importStatus?.state === 'RUNNING'}
<p class="text-sm text-ink-2">{m.admin_system_import_status_running()}</p>
{:else if importStatus?.state === 'DONE'}
<p class="mb-4 rounded-sm border border-green-200 bg-green-50 p-3 text-sm text-green-700">
{m.admin_system_import_status_done({ count: importStatus.processed })}
</p>
<button
data-import-trigger
onclick={triggerImport}
class="rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
>
{m.admin_system_import_btn_retry()}
</button>
{:else if importStatus?.state === 'FAILED'}
<p class="mb-4 rounded-sm border border-red-200 bg-red-50 p-3 text-sm text-red-700">
{m.admin_system_import_status_failed({ message: importStatus.message })}
</p>
<button
data-import-trigger
onclick={triggerImport}
class="rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
>
{m.admin_system_import_btn_retry()}
</button>
{:else}
{#if importStatus !== null}
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_status_idle()}</p>
{/if}
<button
data-import-trigger
onclick={triggerImport}
class="rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
>
{m.admin_system_import_btn_start()}
</button>
{/if}
</div>
<!-- Thumbnail backfill --> <!-- Thumbnail backfill -->
<div class="rounded-sm border border-line bg-surface p-6 shadow-sm"> <div class="rounded-sm border border-line bg-surface p-6 shadow-sm">

View File

@@ -1,81 +0,0 @@
<script lang="ts">
import { m } from '$lib/paraglide/messages.js';
import type { ImportStatus } from './types.js';
let {
importStatus,
ontrigger
}: {
importStatus: ImportStatus | null;
ontrigger: () => void;
} = $props();
const failureMessage = $derived(
importStatus?.statusCode === 'IMPORT_FAILED_NO_SPREADSHEET'
? m.admin_system_import_failed_no_spreadsheet()
: m.admin_system_import_failed_internal()
);
</script>
<div class="rounded-sm border border-line bg-surface p-6 shadow-sm">
<h2 class="mb-5 font-sans text-xs font-bold tracking-widest text-ink-3 uppercase">
{m.admin_system_import_heading()}
</h2>
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_description()}</p>
{#if importStatus?.state === 'RUNNING'}
<div class="mb-4 flex items-center gap-3">
<span
data-testid="spinner"
role="status"
aria-label={m.admin_system_import_status_running()}
class="inline-block h-5 w-5 animate-spin rounded-full border-2 border-ink-3 border-t-brand-mint motion-reduce:animate-none"
></span>
<div>
<p data-testid="processed-count" class="text-base font-bold text-ink">
{importStatus.processed}
</p>
<p class="font-sans text-xs font-bold tracking-widest text-ink-3 uppercase">
{m.admin_system_import_status_running()}
</p>
</div>
</div>
{:else if importStatus?.state === 'DONE'}
<div class="mb-4 rounded-sm border border-green-200 bg-green-50 p-4 text-green-700">
<p data-testid="processed-count" class="text-base font-bold">{importStatus.processed}</p>
<p class="font-sans text-xs font-bold tracking-widest text-green-800 uppercase">
{m.admin_system_import_status_done_label()}
</p>
<p class="mt-1 text-xs text-green-800">{m.admin_system_import_status_done()}</p>
</div>
<button
data-import-trigger
onclick={ontrigger}
class="min-h-[44px] rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
>
{m.admin_system_import_btn_retry()}
</button>
{:else if importStatus?.state === 'FAILED'}
<p class="mb-4 rounded-sm border border-red-200 bg-red-50 p-3 text-sm text-red-700">
{failureMessage}
</p>
<button
data-import-trigger
onclick={ontrigger}
class="min-h-[44px] rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
>
{m.admin_system_import_btn_retry()}
</button>
{:else}
{#if importStatus !== null}
<p class="mb-4 text-sm text-ink-2">{m.admin_system_import_status_idle()}</p>
{/if}
<button
data-import-trigger
onclick={ontrigger}
class="min-h-[44px] rounded-sm bg-primary px-5 py-2 font-sans text-xs font-bold tracking-widest text-primary-fg uppercase transition-opacity hover:opacity-80"
>
{m.admin_system_import_btn_start()}
</button>
{/if}
</div>

View File

@@ -1,131 +0,0 @@
import { describe, expect, it, vi } from 'vitest';
import { render } from 'vitest-browser-svelte';
import { m } from '$lib/paraglide/messages.js';
import ImportStatusCard from './ImportStatusCard.svelte';
import type { ImportStatus } from './types.js';
const makeStatus = (overrides: Partial<ImportStatus> = {}): ImportStatus => ({
state: 'IDLE',
statusCode: 'IMPORT_IDLE',
processed: 0,
startedAt: null,
...overrides
});
describe('ImportStatusCard', () => {
it('shows spinner while state is RUNNING', async () => {
const { getByTestId } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'RUNNING', statusCode: 'IMPORT_RUNNING', processed: 3 }),
ontrigger: () => {}
}
});
await expect.element(getByTestId('spinner')).toBeInTheDocument();
});
it('shows processed count at text-base while RUNNING', async () => {
const { getByTestId } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'RUNNING', statusCode: 'IMPORT_RUNNING', processed: 7 }),
ontrigger: () => {}
}
});
await expect.element(getByTestId('processed-count')).toHaveTextContent('7');
});
it('shows processed count while DONE', async () => {
const { getByText } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'DONE', statusCode: 'IMPORT_DONE', processed: 42 }),
ontrigger: () => {}
}
});
await expect.element(getByText('42')).toBeVisible();
});
it('shows no-spreadsheet message when statusCode is IMPORT_FAILED_NO_SPREADSHEET', async () => {
const { getByText } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({
state: 'FAILED',
statusCode: 'IMPORT_FAILED_NO_SPREADSHEET'
}),
ontrigger: () => {}
}
});
await expect.element(getByText(m.admin_system_import_failed_no_spreadsheet())).toBeVisible();
});
it('shows internal error message when statusCode is IMPORT_FAILED_INTERNAL', async () => {
const { getByText } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'FAILED', statusCode: 'IMPORT_FAILED_INTERNAL' }),
ontrigger: () => {}
}
});
await expect.element(getByText(m.admin_system_import_failed_internal())).toBeVisible();
});
it('shows idle text when importStatus is non-null and state is IDLE', async () => {
const { getByText } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'IDLE', statusCode: 'IMPORT_IDLE' }),
ontrigger: () => {}
}
});
await expect.element(getByText(m.admin_system_import_status_idle())).toBeVisible();
});
it('shows no spinner when importStatus is null', async () => {
const { getByTestId } = render(ImportStatusCard, {
props: { importStatus: null, ontrigger: () => {} }
});
await expect.element(getByTestId('spinner')).not.toBeInTheDocument();
});
it('calls ontrigger when retry button is clicked in DONE state', async () => {
const ontrigger = vi.fn();
const { getByRole } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'DONE', statusCode: 'IMPORT_DONE', processed: 5 }),
ontrigger
}
});
await getByRole('button').click();
expect(ontrigger).toHaveBeenCalledOnce();
});
it('calls ontrigger when retry button is clicked in FAILED state', async () => {
const ontrigger = vi.fn();
const { getByRole } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'FAILED', statusCode: 'IMPORT_FAILED_INTERNAL' }),
ontrigger
}
});
await getByRole('button').click();
expect(ontrigger).toHaveBeenCalledOnce();
});
it('calls ontrigger when start button is clicked in IDLE state', async () => {
const ontrigger = vi.fn();
const { getByRole } = render(ImportStatusCard, {
props: {
importStatus: makeStatus({ state: 'IDLE', statusCode: 'IMPORT_IDLE' }),
ontrigger
}
});
await getByRole('button').click();
expect(ontrigger).toHaveBeenCalledOnce();
});
});

View File

@@ -163,7 +163,7 @@ describe('Admin system page — mass import card', () => {
ok: true, ok: true,
json: async () => ({ json: async () => ({
state: 'FAILED', state: 'FAILED',
statusCode: 'IMPORT_FAILED_NO_SPREADSHEET', message: 'Datei nicht gefunden.',
processed: 0, processed: 0,
startedAt: '2026-01-01T10:00:00' startedAt: '2026-01-01T10:00:00'
}) })
@@ -182,7 +182,7 @@ describe('Admin system page — mass import card', () => {
}) })
); );
render(Page, {}); render(Page, {});
await expect.element(page.getByText(/Keine Tabellendatei gefunden/i)).toBeInTheDocument(); await expect.element(page.getByText(/Datei nicht gefunden/i)).toBeInTheDocument();
await expect.element(page.getByRole('button', { name: /Erneut starten/i })).toBeInTheDocument(); await expect.element(page.getByRole('button', { name: /Erneut starten/i })).toBeInTheDocument();
}); });
}); });

View File

@@ -246,7 +246,7 @@ describe('admin/system page', () => {
return new Response( return new Response(
JSON.stringify({ JSON.stringify({
state: 'FAILED', state: 'FAILED',
statusCode: 'IMPORT_FAILED_INTERNAL', message: 'database error',
processed: 0, processed: 0,
startedAt: null startedAt: null
}), }),
@@ -262,7 +262,7 @@ describe('admin/system page', () => {
render(AdminSystemPage, { props: {} }); render(AdminSystemPage, { props: {} });
await vi.waitFor(() => { await vi.waitFor(() => {
expect(document.body.textContent).toContain('Interner Fehler beim Import'); expect(document.body.textContent).toContain('database error');
}); });
}); });

View File

@@ -1,6 +0,0 @@
export type ImportStatus = {
state: 'IDLE' | 'RUNNING' | 'DONE' | 'FAILED';
statusCode: string;
processed: number;
startedAt: string | null;
};

View File

@@ -1,15 +1,24 @@
<script lang="ts"> <script lang="ts">
import { enhance } from '$app/forms'; import { enhance } from '$app/forms';
import { beforeNavigate, goto } from '$app/navigation';
import { m } from '$lib/paraglide/messages.js'; import { m } from '$lib/paraglide/messages.js';
import UserProfileSection from '$lib/user/UserProfileSection.svelte'; import UserProfileSection from '$lib/user/UserProfileSection.svelte';
import UserGroupsSection from '$lib/user/UserGroupsSection.svelte'; import UserGroupsSection from '$lib/user/UserGroupsSection.svelte';
import AccountSection from './AccountSection.svelte'; import AccountSection from './AccountSection.svelte';
import { createUnsavedWarning } from '$lib/shared/hooks/useUnsavedWarning.svelte';
import UnsavedWarningBanner from '$lib/shared/primitives/UnsavedWarningBanner.svelte';
let { data, form } = $props(); let { data, form } = $props();
const unsaved = createUnsavedWarning(); let isDirty = $state(false);
let showUnsavedWarning = $state(false);
let discardTarget: string | null = $state(null);
beforeNavigate(({ cancel, to }) => {
if (isDirty) {
cancel();
showUnsavedWarning = true;
discardTarget = to?.url.href ?? null;
}
});
</script> </script>
<div class="flex flex-1 flex-col overflow-hidden"> <div class="flex flex-1 flex-col overflow-hidden">
@@ -35,8 +44,23 @@ const unsaved = createUnsavedWarning();
<!-- Scrollable body --> <!-- Scrollable body -->
<div class="flex-1 overflow-y-auto px-5 py-5"> <div class="flex-1 overflow-y-auto px-5 py-5">
{#if unsaved.showUnsavedWarning} {#if showUnsavedWarning}
<UnsavedWarningBanner onDiscard={unsaved.discard} /> <div
class="mb-5 flex items-center justify-between rounded border border-amber-200 bg-amber-50 p-3 text-sm text-amber-800 dark:border-amber-800 dark:bg-amber-950/40 dark:text-amber-300"
>
<span>{m.admin_unsaved_warning()}</span>
<button
type="button"
onclick={() => {
isDirty = false;
showUnsavedWarning = false;
if (discardTarget) goto(discardTarget);
}}
class="ml-4 shrink-0 font-sans text-xs font-bold tracking-widest text-amber-800 uppercase hover:text-amber-900 dark:text-amber-300"
>
{m.person_discard_changes()}
</button>
</div>
{/if} {/if}
{#if form?.error} {#if form?.error}
<div class="mb-5 rounded border border-red-200 bg-red-50 p-3 text-sm text-red-700"> <div class="mb-5 rounded border border-red-200 bg-red-50 p-3 text-sm text-red-700">
@@ -47,11 +71,11 @@ const unsaved = createUnsavedWarning();
<form <form
id="new-user-form" id="new-user-form"
method="POST" method="POST"
use:enhance={() => async ({ result, update }) => { use:enhance
if (result.type === 'redirect') unsaved.clearOnSuccess(); oninput={() => {
await update(); isDirty = true;
showUnsavedWarning = false;
}} }}
oninput={unsaved.markDirty}
class="space-y-5" class="space-y-5"
> >
<div class="rounded-sm border border-line bg-surface p-5 shadow-sm"> <div class="rounded-sm border border-line bg-surface p-5 shadow-sm">

View File

@@ -1,19 +1,9 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { afterEach, describe, expect, it, vi } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte'; import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser'; import { page } from 'vitest/browser';
import Page from './+page.svelte'; import Page from './+page.svelte';
const enhanceCaptureRef = vi.hoisted(() => ({ submitFn: undefined as unknown })); vi.mock('$app/forms', () => ({ enhance: () => () => {} }));
vi.mock('$app/forms', () => ({
enhance: (_el: HTMLFormElement, fn?: unknown) => {
enhanceCaptureRef.submitFn = fn;
return { destroy: vi.fn() };
}
}));
vi.mock('$app/navigation', () => ({ beforeNavigate: vi.fn(), goto: vi.fn() }));
import { beforeNavigate, goto } from '$app/navigation';
const groups = [ const groups = [
{ id: 'g1', name: 'Editoren', permissions: ['WRITE_ALL'] }, { id: 'g1', name: 'Editoren', permissions: ['WRITE_ALL'] },
@@ -30,13 +20,6 @@ const baseData = {
afterEach(cleanup); afterEach(cleanup);
type SubmitFn = () => Promise<
(opts: {
result: { type: string; [key: string]: unknown };
update: () => Promise<void>;
}) => Promise<void>
>;
// ─── Rendering ──────────────────────────────────────────────────────────────── // ─── Rendering ────────────────────────────────────────────────────────────────
describe('Admin new user page rendering', () => { describe('Admin new user page rendering', () => {
@@ -83,103 +66,3 @@ describe('Admin new user page error display', () => {
await expect.element(page.getByText('Ein Fehler ist aufgetreten.')).not.toBeInTheDocument(); await expect.element(page.getByText('Ein Fehler ist aufgetreten.')).not.toBeInTheDocument();
}); });
}); });
// ─── Unsaved-changes guard ────────────────────────────────────────────────────
describe('Admin new user page unsaved-changes guard', () => {
beforeEach(() => {
vi.clearAllMocks();
enhanceCaptureRef.submitFn = undefined;
});
it('does not show unsaved warning initially', async () => {
render(Page, { data: baseData, form: null });
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
});
it('cancels navigation and shows banner when form is dirty', async () => {
render(Page, { data: baseData, form: null });
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="email"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
const cancel = vi.fn();
callback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
expect(cancel).toHaveBeenCalled();
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
});
it('does not cancel navigation when form is clean', async () => {
render(Page, { data: baseData, form: null });
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
const cancel = vi.fn();
callback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
expect(cancel).not.toHaveBeenCalled();
});
it('discard button calls goto with the target URL', async () => {
render(Page, { data: baseData, form: null });
const [callback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="email"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
callback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/users') } });
await page.getByRole('button', { name: /verwerfen/i }).click();
expect(vi.mocked(goto)).toHaveBeenCalledWith('http://localhost/admin/users');
});
it('clears banner when enhance callback receives a redirect result', async () => {
render(Page, { data: baseData, form: null });
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="email"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/users') } });
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
await innerFn({
result: { type: 'redirect', location: '/admin/users', status: 303 },
update: vi.fn().mockResolvedValue(undefined)
});
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).not.toBeInTheDocument();
const cancel = vi.fn();
navCallback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
expect(cancel).not.toHaveBeenCalled();
});
it('keeps banner when enhance callback receives a failure result', async () => {
render(Page, { data: baseData, form: null });
const [navCallback] = vi.mocked(beforeNavigate).mock.calls[0];
document
.querySelector<HTMLInputElement>('input[name="email"]')!
.dispatchEvent(new InputEvent('input', { bubbles: true }));
navCallback({ cancel: vi.fn(), to: { url: new URL('http://localhost/admin/users') } });
await expect.element(page.getByText(/ungespeicherte Änderungen/i)).toBeInTheDocument();
const innerFn = await (enhanceCaptureRef.submitFn as SubmitFn)();
await innerFn({
result: { type: 'failure', status: 400, data: { error: 'E-Mail bereits vergeben' } },
update: vi.fn().mockResolvedValue(undefined)
});
const cancel = vi.fn();
navCallback({ cancel, to: { url: new URL('http://localhost/admin/users') } });
expect(cancel).toHaveBeenCalled();
});
});

View File

@@ -1,6 +1,6 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { cleanup, render } from 'vitest-browser-svelte'; import { cleanup, render } from 'vitest-browser-svelte';
import { page } from 'vitest/browser'; import { page, userEvent } from 'vitest/browser';
import { createRawSnippet } from 'svelte'; import { createRawSnippet } from 'svelte';
vi.mock('$env/static/public', () => ({ PUBLIC_NOTIFICATION_POLL_MS: '60000' })); vi.mock('$env/static/public', () => ({ PUBLIC_NOTIFICATION_POLL_MS: '60000' }));
@@ -96,13 +96,13 @@ describe('Layout user dropdown', () => {
it('opens dropdown on button click', async () => { it('opens dropdown on button click', async () => {
render(Layout, { data: makeData(), children: emptySnippet }); render(Layout, { data: makeData(), children: emptySnippet });
((await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement).click(); await page.getByRole('button', { name: /MM/ }).click();
await expect.element(page.getByRole('link', { name: /Profil/i })).toBeInTheDocument(); await expect.element(page.getByRole('link', { name: /Profil/i })).toBeInTheDocument();
}); });
it('profile link points to /profile', async () => { it('profile link points to /profile', async () => {
render(Layout, { data: makeData(), children: emptySnippet }); render(Layout, { data: makeData(), children: emptySnippet });
((await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement).click(); await page.getByRole('button', { name: /MM/ }).click();
await expect await expect
.element(page.getByRole('link', { name: /Profil/i })) .element(page.getByRole('link', { name: /Profil/i }))
.toHaveAttribute('href', '/profile'); .toHaveAttribute('href', '/profile');
@@ -110,16 +110,16 @@ describe('Layout user dropdown', () => {
it('logout button is in the dropdown', async () => { it('logout button is in the dropdown', async () => {
render(Layout, { data: makeData(), children: emptySnippet }); render(Layout, { data: makeData(), children: emptySnippet });
((await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement).click(); await page.getByRole('button', { name: /MM/ }).click();
await expect.element(page.getByRole('button', { name: /Abmelden/i })).toBeInTheDocument(); await expect.element(page.getByRole('button', { name: /Abmelden/i })).toBeInTheDocument();
}); });
it('closes dropdown when Escape is pressed', async () => { it('closes dropdown when Escape is pressed', async () => {
render(Layout, { data: makeData(), children: emptySnippet }); render(Layout, { data: makeData(), children: emptySnippet });
const btnEl = (await page.getByRole('button', { name: /MM/ }).element()) as HTMLElement; const btn = page.getByRole('button', { name: /MM/ });
btnEl.click(); await btn.click();
await expect.element(page.getByRole('link', { name: /Profil/i })).toBeInTheDocument(); await expect.element(page.getByRole('link', { name: /Profil/i })).toBeInTheDocument();
btnEl.dispatchEvent(new KeyboardEvent('keydown', { key: 'Escape', bubbles: true })); await userEvent.keyboard('{Escape}');
await tick(); await tick();
await expect.element(page.getByRole('link', { name: /Profil/i })).not.toBeInTheDocument(); await expect.element(page.getByRole('link', { name: /Profil/i })).not.toBeInTheDocument();
}); });

View File

@@ -1,4 +1,3 @@
import { sentrySvelteKit } from '@sentry/sveltekit';
import { paraglideVitePlugin } from '@inlang/paraglide-js'; import { paraglideVitePlugin } from '@inlang/paraglide-js';
import devtoolsJson from 'vite-plugin-devtools-json'; import devtoolsJson from 'vite-plugin-devtools-json';
import tailwindcss from '@tailwindcss/vite'; import tailwindcss from '@tailwindcss/vite';
@@ -34,21 +33,6 @@ export default defineConfig({
} }
}, },
plugins: [ plugins: [
sentrySvelteKit({
org: 'familienarchiv',
project: 'frontend',
authToken: process.env.SENTRY_AUTH_TOKEN,
sentryUrl: (() => {
const dsn = process.env.VITE_SENTRY_DSN;
if (!dsn) return undefined;
try {
return new URL(dsn).origin;
} catch {
return undefined;
}
})(),
autoUploadSourceMaps: !!process.env.SENTRY_AUTH_TOKEN
}),
tailwindcss(), tailwindcss(),
sveltekit(), sveltekit(),
devtoolsJson(), devtoolsJson(),

View File

@@ -24,8 +24,6 @@ export default defineConfig({
}) })
], ],
test: { test: {
testTimeout: 30_000,
hookTimeout: 15_000,
expect: { requireAssertions: true }, expect: { requireAssertions: true },
browser: { browser: {
enabled: true, enabled: true,

View File

@@ -88,13 +88,3 @@ git.raddatz.cloud {
import security_headers import security_headers
reverse_proxy 127.0.0.1:3005 reverse_proxy 127.0.0.1:3005
} }
grafana.archiv.raddatz.cloud {
import security_headers
reverse_proxy 127.0.0.1:3003
}
glitchtip.archiv.raddatz.cloud {
import security_headers
reverse_proxy 127.0.0.1:3002
}

View File

@@ -1,10 +0,0 @@
apiVersion: 1
providers:
- name: default
type: file
disableDeletion: true
updateIntervalSeconds: 30
options:
path: /etc/grafana/provisioning/dashboards
foldersFromFilesStructure: false

View File

@@ -1,284 +0,0 @@
{
"__inputs": [
{
"name": "DS_LOKI",
"label": "Loki",
"description": "",
"type": "datasource",
"pluginId": "loki",
"pluginName": "Loki"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.1.0"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "panel",
"id": "logs",
"name": "Logs",
"version": ""
},
{
"type": "datasource",
"id": "loki",
"name": "Loki",
"version": "1.0.0"
}
],
"annotations": {
"list": [
{
"$$hashKey": "object:75",
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "Log Viewer Dashboard for Loki",
"editable": false,
"gnetId": 13639,
"graphTooltip": 0,
"id": null,
"iteration": 1608932746420,
"links": [
{
"$$hashKey": "object:59",
"icon": "bolt",
"includeVars": true,
"keepTime": true,
"tags": [],
"targetBlank": true,
"title": "View In Explore",
"type": "link",
"url": "/explore?orgId=1&left=[\"now-1h\",\"now\",\"Loki\",{\"expr\":\"{job=\\\"$app\\\"}\"},{\"ui\":[true,true,true,\"none\"]}]"
},
{
"$$hashKey": "object:61",
"icon": "external link",
"tags": [],
"targetBlank": true,
"title": "Learn LogQL",
"type": "link",
"url": "https://grafana.com/docs/loki/latest/logql/"
}
],
"panels": [
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": {"type": "loki", "uid": "loki"},
"fieldConfig": {
"defaults": {
"custom": {},
"links": []
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": false,
"total": false,
"values": false
},
"lines": false,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pluginVersion": "7.1.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(count_over_time({job=\"$app\"} |= \"$search\" [$__interval]))",
"legendFormat": "",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:168",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
},
{
"$$hashKey": "object:169",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"datasource": {"type": "loki", "uid": "loki"},
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"gridPos": {
"h": 25,
"w": 24,
"x": 0,
"y": 3
},
"id": 2,
"maxDataPoints": "",
"options": {
"showLabels": false,
"showTime": true,
"sortOrder": "Descending",
"wrapLogMessage": false
},
"targets": [
{
"expr": "{job=\"$app\"} |= \"$search\" | logfmt",
"hide": false,
"legendFormat": "",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "",
"transparent": true,
"type": "logs"
}
],
"refresh": false,
"schemaVersion": 26,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": null,
"current": {},
"datasource": {"type": "loki", "uid": "loki"},
"definition": "label_values(job)",
"hide": 0,
"includeAll": false,
"label": "App",
"multi": false,
"name": "app",
"options": [],
"query": "label_values(job)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"selected": false,
"text": "",
"value": ""
},
"hide": 0,
"label": "String Match",
"name": "search",
"options": [
{
"selected": true,
"text": "",
"value": ""
}
],
"query": "",
"skipUrlSync": false,
"type": "textbox"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"hidden": false,
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Logs / App",
"uid": "sadlil-loki-apps-dashboard",
"version": 13
}

View File

@@ -1,38 +0,0 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
url: http://obs-prometheus:9090
isDefault: true
editable: false
- name: Loki
type: loki
uid: loki
url: http://obs-loki:3100
editable: false
jsonData:
derivedFields:
- name: TraceID
matcherRegex: '"traceId":"(\w+)"'
url: "${__value.raw}"
datasourceUid: tempo
- name: Tempo
type: tempo
uid: tempo
url: http://obs-tempo:3200
editable: false
jsonData:
tracesToLogsV2:
datasourceUid: loki
spanStartTimeShift: "-1m"
spanEndTimeShift: "1m"
filterByTraceID: true
filterBySpanID: false
serviceMap:
datasourceUid: prometheus
nodeGraph:
enabled: true

View File

@@ -1,40 +0,0 @@
auth_enabled: false # safe — loki is not exposed beyond obs-net. Add auth before binding port 3100 to host.
server:
http_listen_port: 3100
common:
instance_addr: 127.0.0.1
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory # correct for single-node — no etcd/consul needed here
schema_config:
configs:
- from: 2024-01-01
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
limits_config:
retention_period: 720h # 30 days — low-volume family archive; revisit if log volume grows
compactor:
working_directory: /loki/compactor
compaction_interval: 10m
retention_enabled: true
retention_delete_delay: 2h
retention_delete_worker_count: 150
delete_request_store: filesystem
analytics:
reporting_enabled: false # no telemetry sent to Grafana Labs

View File

@@ -1,24 +0,0 @@
# Non-secret observability stack configuration — tracked in git.
# Secret values (passwords, keys) are injected by CI from Gitea secrets
# into /opt/familienarchiv/obs-secrets.env at deploy time.
#
# For local dev the main .env file supplies these values instead;
# this file is only used in the CI/production path.
# Host ports (all bound to 127.0.0.1 — Caddy is the external entry point)
PORT_GRAFANA=3003
PORT_GLITCHTIP=3002
PORT_PROMETHEUS=9090
# Public URLs — used for internal redirects, alert email links, OAuth callbacks
GF_SERVER_ROOT_URL=https://grafana.archiv.raddatz.cloud
GLITCHTIP_DOMAIN=https://glitchtip.archiv.raddatz.cloud
POSTGRES_USER=archiv
# PostgreSQL hostname for GlitchTip db-init and workers.
# The actual value depends on the Compose project name — it is not a fixed string.
# CI sets POSTGRES_HOST in obs-secrets.env per environment:
# staging: archiv-staging-db-1 (project archiv-staging + service db)
# production: archiv-production-db-1 (project archiv-production + service db)
# For local dev, set POSTGRES_HOST in your .env file (defaults to archive-db there).

View File

@@ -1,28 +0,0 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: node
static_configs:
- targets: ['node-exporter:9100']
- job_name: cadvisor
static_configs:
- targets: ['cadvisor:8080']
- job_name: spring-boot
metrics_path: /actuator/prometheus
static_configs:
# Uses the Docker service name (not container_name) for reliable DNS resolution.
# Target will show as DOWN until backend instrumentation issue adds
# micrometer-registry-prometheus and exposes the endpoint — this is expected.
- targets: ['backend:8081']
- job_name: ocr-service
metrics_path: /metrics
static_configs:
# TODO: remove or add prometheus-client to ocr-service.
# The Python OCR service does not currently expose Prometheus metrics.
# This target will show as DOWN until prometheus-client is added to ocr-service.
- targets: ['ocr:8000']

View File

@@ -1,30 +0,0 @@
server:
http_listen_port: 9080
grpc_listen_port: 0 # gRPC disabled — used for Promtail clustering only; single-node deployment
positions:
filename: /tmp/positions.yaml # /tmp is a named volume (promtail_positions) — persists across restarts
clients:
- url: http://loki:3100/loki/api/v1/push
# Loki HTTP API is unauthenticated internally. Any container on obs-net can push logs.
# Acceptable: only trusted application containers join this network.
scrape_configs:
- job_name: docker-containers
docker_sd_configs:
- host: unix:///var/run/docker.sock
refresh_interval: 5s
relabel_configs:
- source_labels: ['__meta_docker_container_name']
regex: '/(.*)'
target_label: 'container_name'
# Note: container_name differs between dev (archive-backend) and prod
# (archiv-production-backend-1). Prefer compose_service for stable LogQL
# queries across environments — it is stable: backend, db, minio, etc.
- source_labels: ['__meta_docker_container_label_com_docker_compose_service']
target_label: 'compose_service'
- source_labels: ['__meta_docker_container_label_com_docker_compose_project']
target_label: 'compose_project'
- source_labels: ['__meta_docker_container_log_stream']
target_label: 'logstream'

View File

@@ -1,48 +0,0 @@
server:
http_listen_port: 3200
distributor:
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
ingester:
max_block_duration: 5m
compactor:
compaction:
# 30 days — matches Loki retention. Compactor enforces this automatically;
# no manual intervention needed under normal trace volumes.
block_retention: 720h
storage:
trace:
# Local filesystem storage — single-VPS deployment, no S3 backend needed.
# Both paths are on the same named Docker volume (tempo_data) so they
# survive container restarts without split-brain between WAL and blocks.
backend: local
local:
path: /var/tempo/blocks
wal:
path: /var/tempo/wal
metrics_generator:
registry:
external_labels:
source: tempo
storage:
path: /var/tempo/generator/wal
# Tempo HTTP API (port 3200) is unauthenticated. Access is controlled entirely
# by network isolation: only Grafana (on obs-net) should reach this port.
# The OTLP receivers (4317 gRPC, 4318 HTTP) are internal to archiv-net only.
overrides:
defaults:
metrics_generator:
processors:
- service-graphs
- span-metrics

View File

@@ -1,26 +1,16 @@
# runner-config.yaml — only the relevant section # runner-config.yaml — only the relevant section
container: container:
# join the same network Gitea is on, so job containers can resolve 'gitea' # passed as DOCKER_HOST inside the job container
# for actions/checkout and other internal API calls.
network: gitea_gitea
# passed as DOCKER_HOST inside the job container; act_runner auto-mounts
# this socket path into the job, so no explicit -v option is needed.
docker_host: "unix:///var/run/docker.sock" docker_host: "unix:///var/run/docker.sock"
# Job workspaces are stored here and mounted at the same absolute path # whitelists the socket path so workflows can mount it
# inside job containers. Identical host <-> container path is the requirement:
# Compose resolves relative bind mounts to $(pwd) inside the job container
# and passes that absolute path to the host daemon, which must find the file
# at that exact host path. Prerequisite: /srv/gitea-workspace exists on the
# host and is bind-mounted in the runner container (see compose.yaml).
workdir_parent: /srv/gitea-workspace
# whitelists volumes that workflow steps may bind-mount
valid_volumes: valid_volumes:
- "/var/run/docker.sock" - "/var/run/docker.sock"
- "/srv/gitea-workspace" # appended to `docker run` when the runner spawns a job container
- "/opt/familienarchiv" # SECURITY: Mounting the Docker socket grants job containers root-equivalent
# mount the workspace and the permanent obs/config directory into job containers. # access to the host Docker daemon. Acceptable here because only trusted code
# /opt/familienarchiv is the stable path CI copies configs to (ADR-016); it must # from this private repo runs on this runner. Do NOT use on a runner that
# be mounted here so deploy steps can write through to the host filesystem. # accepts untrusted PRs from external contributors.
options: "-v /srv/gitea-workspace:/srv/gitea-workspace -v /opt/familienarchiv:/opt/familienarchiv" options: "-v /var/run/docker.sock:/var/run/docker.sock"
# keep behavior default — Testcontainers handles its own networking # keep network mode default (bridge) — Testcontainers handles its own networking
force_pull: false force_pull: false