Compare commits

..

1 Commits

Author SHA1 Message Date
Marcel
b1ff624c98 fix(infra): pin GlitchTip image to 6.1.6 (v4 tag never existed)
glitchtip/glitchtip:v4 is not a real tag — GlitchTip does not use a
v-prefix in its Docker image versioning. Latest stable release is 6.1.6.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-05-15 17:53:13 +02:00
24 changed files with 65 additions and 874 deletions

View File

@@ -29,17 +29,16 @@ OCR_TRAINING_TOKEN=change-me-in-production
# --- Observability --- # --- Observability ---
# Optional stack — start with: docker compose -f docker-compose.observability.yml up -d # Optional stack — start with: docker compose -f docker-compose.observability.yml up -d
# Requires the main stack to already be running (docker compose up -d creates archiv-net). # Requires the main stack to already be running (docker compose up -d creates archiv-net).
# In production the stack is managed from /opt/familienarchiv/ (see docs/DEPLOYMENT.md §4).
# Ports for host access # Ports for host access
PORT_GRAFANA=3003 PORT_GRAFANA=3001
PORT_GLITCHTIP=3002 PORT_GLITCHTIP=3002
PORT_PROMETHEUS=9090 PORT_PROMETHEUS=9090
# Grafana admin password — change this before exposing Grafana beyond localhost # Grafana admin password — change this before exposing Grafana beyond localhost
GRAFANA_ADMIN_PASSWORD=changeme GRAFANA_ADMIN_PASSWORD=changeme
# GlitchTip domain — production: use https://glitchtip.archiv.raddatz.cloud (must match Caddy vhost) # GlitchTip domain — production: use https://grafana.raddatz.cloud (must match Caddy vhost)
GLITCHTIP_DOMAIN=http://localhost:3002 GLITCHTIP_DOMAIN=http://localhost:3002
# GlitchTip secret key — Django SECRET_KEY equivalent, used to sign sessions and tokens. # GlitchTip secret key — Django SECRET_KEY equivalent, used to sign sessions and tokens.
@@ -48,15 +47,6 @@ GLITCHTIP_DOMAIN=http://localhost:3002
# Generate with: python3 -c "import secrets; print(secrets.token_hex(50))" # Generate with: python3 -c "import secrets; print(secrets.token_hex(50))"
GLITCHTIP_SECRET_KEY=changeme-generate-a-real-secret GLITCHTIP_SECRET_KEY=changeme-generate-a-real-secret
# PostgreSQL hostname for GlitchTip's db-init job and workers.
# Override when only the staging stack is running (container name differs from archive-db).
# Default (archive-db) is correct for production with the full stack up.
POSTGRES_HOST=archive-db
# $$ escaping note: passwords in /opt/familienarchiv/.env that contain a literal '$' must
# use '$$' so Docker Compose does not expand them as variable references.
# Example: a password 'p@$$word' should be written as 'p@$$$$word' in the .env file.
# Error reporting DSNs — leave empty to disable the SDK (safe default). # Error reporting DSNs — leave empty to disable the SDK (safe default).
# SENTRY_DSN: backend (Spring Boot) — used by the GlitchTip/Sentry Java SDK # SENTRY_DSN: backend (Spring Boot) — used by the GlitchTip/Sentry Java SDK
SENTRY_DSN= SENTRY_DSN=

View File

@@ -78,6 +78,12 @@ jobs:
APP_MAIL_FROM=noreply@staging.raddatz.cloud APP_MAIL_FROM=noreply@staging.raddatz.cloud
IMPORT_HOST_DIR=/srv/familienarchiv-staging/import IMPORT_HOST_DIR=/srv/familienarchiv-staging/import
POSTGRES_USER=archiv POSTGRES_USER=archiv
PORT_GRAFANA=3003
PORT_GLITCHTIP=3002
PORT_PROMETHEUS=9090
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
GLITCHTIP_DOMAIN=https://glitchtip.archiv.raddatz.cloud
SENTRY_DSN=${{ secrets.SENTRY_DSN }} SENTRY_DSN=${{ secrets.SENTRY_DSN }}
EOF EOF
@@ -125,77 +131,13 @@ jobs:
--profile staging \ --profile staging \
up -d --wait --remove-orphans up -d --wait --remove-orphans
- name: Deploy observability configs
# Copies the compose file and config tree from the workspace checkout
# into /opt/familienarchiv/ — the permanent location that persists
# between CI runs. Containers started in the next step bind-mount
# from there, so a future workspace wipe cannot corrupt a running
# config file.
#
# obs-secrets.env is written fresh from Gitea secrets on every run so
# Gitea is always the single source of truth for secret rotation.
# Non-secret config lives in infra/observability/obs.env (tracked in git).
run: |
rm -rf /opt/familienarchiv/infra/observability
mkdir -p /opt/familienarchiv/infra/observability
cp -r infra/observability/. /opt/familienarchiv/infra/observability/
cp docker-compose.observability.yml /opt/familienarchiv/
cat > /opt/familienarchiv/obs-secrets.env <<'EOF'
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
POSTGRES_PASSWORD=${{ secrets.STAGING_POSTGRES_PASSWORD }}
POSTGRES_HOST=archiv-staging-db-1
EOF
# Note: POSTGRES_HOST is derived from the Compose project name (archiv-staging)
# and service name (db). A project rename requires updating this value.
chmod 600 /opt/familienarchiv/obs-secrets.env
- name: Validate observability compose config
# Dry-run: resolves all variable substitutions and reports any missing
# required keys before containers start. Catches undefined variables and
# YAML errors in config files updated by the previous step.
# --env-file order: obs.env first (git-tracked defaults), obs-secrets.env
# second (CI-written secrets). Later files win on duplicate keys, so
# obs-secrets.env overrides POSTGRES_HOST set in obs.env.
run: |
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
config --quiet
- name: Start observability stack - name: Start observability stack
# Runs with absolute paths so bind mounts resolve to stable host paths
# that survive workspace wipes between nightly runs (see ADR-016).
# Non-secret config from obs.env (git-tracked); secrets from obs-secrets.env
# (written fresh from Gitea secrets above). --env-file order: obs.env first,
# obs-secrets.env second — later file wins on duplicate keys.
run: | run: |
docker compose \ docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \ -f docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \ --env-file .env.staging \
--env-file /opt/familienarchiv/obs-secrets.env \
up -d --wait --remove-orphans up -d --wait --remove-orphans
- name: Assert observability stack health
# docker compose up --wait covers services WITH healthcheck directives only.
# obs-promtail, obs-cadvisor, obs-node-exporter, and obs-glitchtip-worker have
# no healthcheck — they are considered "started" as soon as the process runs.
# This step explicitly asserts the five healthchecked critical services are
# healthy before the smoke test proceeds.
run: |
set -e
unhealthy=""
for svc in obs-loki obs-prometheus obs-grafana obs-tempo obs-glitchtip; do
status=$(docker inspect "$svc" --format '{{.State.Health.Status}}' 2>/dev/null || echo "missing")
if [ "$status" != "healthy" ]; then
echo "::error::$svc is not healthy (status: $status)"
unhealthy="$unhealthy $svc"
fi
done
[ -z "$unhealthy" ] || exit 1
echo "All critical observability services are healthy"
- name: Reload Caddy - name: Reload Caddy
# Apply any committed Caddyfile changes before smoke-testing the # Apply any committed Caddyfile changes before smoke-testing the
# public surface. Without this step, a Caddyfile edit lands in the # public surface. Without this step, a Caddyfile edit lands in the

View File

@@ -76,6 +76,12 @@ jobs:
APP_MAIL_FROM=noreply@raddatz.cloud APP_MAIL_FROM=noreply@raddatz.cloud
IMPORT_HOST_DIR=/srv/familienarchiv-production/import IMPORT_HOST_DIR=/srv/familienarchiv-production/import
POSTGRES_USER=archiv POSTGRES_USER=archiv
PORT_GRAFANA=3003
PORT_GLITCHTIP=3002
PORT_PROMETHEUS=9090
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
GLITCHTIP_DOMAIN=https://glitchtip.archiv.raddatz.cloud
SENTRY_DSN=${{ secrets.SENTRY_DSN }} SENTRY_DSN=${{ secrets.SENTRY_DSN }}
EOF EOF
@@ -98,75 +104,13 @@ jobs:
--env-file .env.production \ --env-file .env.production \
up -d --wait --remove-orphans up -d --wait --remove-orphans
- name: Deploy observability configs
# Mirrors the nightly approach: copies obs compose file and config tree
# to /opt/familienarchiv/ (permanent path, survives workspace wipes — ADR-016),
# then writes obs-secrets.env fresh from Gitea secrets.
# Non-secret config lives in infra/observability/obs.env (tracked in git).
run: |
rm -rf /opt/familienarchiv/infra/observability
mkdir -p /opt/familienarchiv/infra/observability
cp -r infra/observability/. /opt/familienarchiv/infra/observability/
cp docker-compose.observability.yml /opt/familienarchiv/
cat > /opt/familienarchiv/obs-secrets.env <<'EOF'
GRAFANA_ADMIN_PASSWORD=${{ secrets.GRAFANA_ADMIN_PASSWORD }}
GLITCHTIP_SECRET_KEY=${{ secrets.GLITCHTIP_SECRET_KEY }}
POSTGRES_PASSWORD=${{ secrets.PROD_POSTGRES_PASSWORD }}
POSTGRES_HOST=archiv-production-db-1
EOF
# Note: POSTGRES_HOST is derived from the Compose project name (archiv-production)
# and service name (db). A project rename requires updating this value.
chmod 600 /opt/familienarchiv/obs-secrets.env
- name: Validate observability compose config
# Dry-run: resolves all variable substitutions and reports any missing
# required keys before containers start. Catches undefined variables and
# YAML errors in config files updated by the previous step.
# --env-file order: obs.env first (git-tracked defaults), obs-secrets.env
# second (CI-written secrets). Later files win on duplicate keys, so
# obs-secrets.env overrides POSTGRES_HOST set in obs.env.
# Keep in sync with the equivalent step in nightly.yml (#603).
run: |
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
config --quiet
- name: Start observability stack - name: Start observability stack
# Runs with absolute paths so bind mounts resolve to stable host paths
# that survive workspace wipes between runs (see ADR-016).
# Non-secret config from obs.env (git-tracked); secrets from obs-secrets.env
# (written fresh from Gitea secrets above). --env-file order: obs.env first,
# obs-secrets.env second — later file wins on duplicate keys.
# Keep in sync with the equivalent step in nightly.yml (#603).
run: | run: |
docker compose \ docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \ -f docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \ --env-file .env.production \
--env-file /opt/familienarchiv/obs-secrets.env \
up -d --wait --remove-orphans up -d --wait --remove-orphans
- name: Assert observability stack health
# docker compose up --wait covers services WITH healthcheck directives only.
# obs-promtail, obs-cadvisor, obs-node-exporter, and obs-glitchtip-worker have
# no healthcheck — they are considered "started" as soon as the process runs.
# This step explicitly asserts the five healthchecked critical services are
# healthy before the smoke test proceeds.
# Keep in sync with the equivalent step in nightly.yml (#603).
run: |
set -e
unhealthy=""
for svc in obs-loki obs-prometheus obs-grafana obs-tempo obs-glitchtip; do
status=$(docker inspect "$svc" --format '{{.State.Health.Status}}' 2>/dev/null || echo "missing")
if [ "$status" != "healthy" ]; then
echo "::error::$svc is not healthy (status: $status)"
unhealthy="$unhealthy $svc"
fi
done
[ -z "$unhealthy" ] || exit 1
echo "All critical observability services are healthy"
- name: Reload Caddy - name: Reload Caddy
# See nightly.yml — same rationale and mechanism: DooD job containers # See nightly.yml — same rationale and mechanism: DooD job containers
# cannot call systemctl directly; nsenter via a privileged sibling # cannot call systemctl directly; nsenter via a privileged sibling

View File

@@ -274,35 +274,6 @@ Back button pattern — use the shared `<BackButton>` component from `$lib/share
→ See [docs/DEPLOYMENT.md](./docs/DEPLOYMENT.md) → See [docs/DEPLOYMENT.md](./docs/DEPLOYMENT.md)
### Observability stack (separate compose file)
Run via `docker-compose.observability.yml` — requires the main stack to be running first. Full setup procedure: [docs/DEPLOYMENT.md §4](./docs/DEPLOYMENT.md#4-logs--observability).
| Service | Container | Default Port | Purpose |
|---------|-----------|-------------|---------|
| Grafana | `obs-grafana` | 3003 | Metrics / logs / traces dashboard |
| Prometheus | `obs-prometheus` | 9090 (dev only — `127.0.0.1` bound) | Metrics store |
| Loki | `obs-loki` | — (internal) | Log store |
| Tempo | `obs-tempo` | — (internal) | Trace store |
| GlitchTip | `obs-glitchtip` | 3002 | Error tracking (Sentry-compatible) |
### Observability env vars
| Variable | Purpose |
|----------|---------|
| `PORT_GRAFANA` | Host port for Grafana UI (default: `3003`) |
| `PORT_GLITCHTIP` | Host port for GlitchTip UI (default: `3002`) |
| `PORT_PROMETHEUS` | Host port for Prometheus UI (default: `9090`) |
| `GRAFANA_ADMIN_PASSWORD` | Grafana `admin` login password — generate with `openssl rand -hex 32` |
| `GLITCHTIP_SECRET_KEY` | Django secret key for GlitchTip — generate with `python3 -c "import secrets; print(secrets.token_hex(32))"` |
| `GLITCHTIP_DOMAIN` | Public-facing base URL for GlitchTip (email links, CORS), e.g. `https://glitchtip.example.com` |
| `SENTRY_DSN` | GlitchTip/Sentry DSN for the backend (Spring Boot) — leave empty to disable |
| `VITE_SENTRY_DSN` | GlitchTip/Sentry DSN for the frontend (SvelteKit) — injected at build time via Vite |
## Observability
→ See [docs/OBSERVABILITY.md](./docs/OBSERVABILITY.md) — where to look for logs, traces, metrics, and errors.
## API Testing ## API Testing
HTTP test files are in `backend/api_tests/` for use with the VS Code REST Client extension. HTTP test files are in `backend/api_tests/` for use with the VS Code REST Client extension.

View File

@@ -48,11 +48,6 @@
<groupId>org.springframework.boot</groupId> <groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId> <artifactId>spring-boot-starter-actuator</artifactId>
</dependency> </dependency>
<!-- Spring Boot 4.0 splits Micrometer metrics export (incl. Prometheus scrape endpoint) into its own starter -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-micrometer-metrics</artifactId>
</dependency>
<dependency> <dependency>
<groupId>org.springframework.boot</groupId> <groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-validation</artifactId> <artifactId>spring-boot-starter-validation</artifactId>

View File

@@ -3,16 +3,13 @@ package org.raddatz.familienarchiv.security;
import lombok.RequiredArgsConstructor; import lombok.RequiredArgsConstructor;
import org.raddatz.familienarchiv.user.CustomUserDetailsService; import org.raddatz.familienarchiv.user.CustomUserDetailsService;
import jakarta.servlet.http.HttpServletResponse;
import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Configuration;
import org.springframework.core.annotation.Order;
import org.springframework.core.env.Environment; import org.springframework.core.env.Environment;
import org.springframework.security.authentication.dao.DaoAuthenticationProvider; import org.springframework.security.authentication.dao.DaoAuthenticationProvider;
import org.springframework.security.config.Customizer; import org.springframework.security.config.Customizer;
import org.springframework.security.config.annotation.web.builders.HttpSecurity; import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
import org.springframework.security.config.annotation.web.configurers.AbstractHttpConfigurer;
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder; import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
import org.springframework.security.crypto.password.PasswordEncoder; import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.web.SecurityFilterChain; import org.springframework.security.web.SecurityFilterChain;
@@ -37,28 +34,6 @@ public class SecurityConfig {
return authProvider; return authProvider;
} }
@Bean
@Order(1)
public SecurityFilterChain managementFilterChain(HttpSecurity http) throws Exception {
http
.securityMatcher("/actuator/**")
.authorizeHttpRequests(auth -> {
// Health and Prometheus are open — Docker health checks and Prometheus scraping need no credentials.
auth.requestMatchers("/actuator/health", "/actuator/prometheus").permitAll();
// All other actuator endpoints (metrics, info, env, heapdump…) require authentication.
auth.anyRequest().authenticated();
})
// Explicitly return 401 for any unauthenticated actuator request.
// Without this override, Spring Security's DelegatingAuthenticationEntryPoint
// would redirect browser-like clients to the form-login page (302 → /login),
// making it impossible to distinguish "not authenticated" from "not found" in tests.
.exceptionHandling(ex -> ex.authenticationEntryPoint(
(req, res, e) -> res.setStatus(HttpServletResponse.SC_UNAUTHORIZED)))
.formLogin(AbstractHttpConfigurer::disable)
.csrf(AbstractHttpConfigurer::disable);
return http.build();
}
@Bean @Bean
public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
http http
@@ -79,10 +54,8 @@ public class SecurityConfig {
.csrf(csrf -> csrf.disable()) .csrf(csrf -> csrf.disable())
.authorizeHttpRequests(auth -> { .authorizeHttpRequests(auth -> {
// Actuator endpoints are governed by managementFilterChain (@Order(1)) above. // Health endpoint must be open so CI/Docker health checks work without credentials
// The permitAll() lines here are a belt-and-suspenders fallback in case any auth.requestMatchers("/actuator/health").permitAll();
// actuator path escapes that chain's securityMatcher. See docs/adr/017.
auth.requestMatchers("/actuator/health", "/actuator/prometheus").permitAll();
// Password reset endpoints are unauthenticated by nature // Password reset endpoints are unauthenticated by nature
auth.requestMatchers("/api/auth/forgot-password", "/api/auth/reset-password").permitAll(); auth.requestMatchers("/api/auth/forgot-password", "/api/auth/reset-password").permitAll();
// Invite-based registration endpoints are public // Invite-based registration endpoints are public

View File

@@ -49,8 +49,7 @@ management:
# Management port is separate from the app port so that: # Management port is separate from the app port so that:
# (a) Caddy never proxies /actuator/* (it only routes :8080 → the app port) # (a) Caddy never proxies /actuator/* (it only routes :8080 → the app port)
# (b) Prometheus scrapes backend:8081 directly inside archiv-net, not via Caddy # (b) Prometheus scrapes backend:8081 directly inside archiv-net, not via Caddy
# Note: in Spring Boot 4.0 the management port shares the security filter chain; /actuator/health # (c) Spring Security's session-authenticated filter chain on :8080 never sees actuator requests
# and /actuator/prometheus must be explicitly permitted in SecurityConfig — see SecurityConfig.java.
port: 8081 port: 8081
endpoints: endpoints:
web: web:
@@ -59,16 +58,6 @@ management:
endpoint: endpoint:
prometheus: prometheus:
enabled: true enabled: true
# Spring Boot 4.0: metrics export is disabled by default — explicitly opt in for Prometheus
prometheus:
metrics:
export:
enabled: true
metrics:
tags:
# Common tag applied to every metric so Grafana's Spring Boot dashboard can filter by application name.
# Override via MANAGEMENT_METRICS_TAGS_APPLICATION env var.
application: ${spring.application.name}
health: health:
mail: mail:
enabled: false enabled: false
@@ -77,18 +66,13 @@ management:
probability: 1.0 # 100% in dev; override via MANAGEMENT_TRACING_SAMPLING_PROBABILITY in prod compose probability: 1.0 # 100% in dev; override via MANAGEMENT_TRACING_SAMPLING_PROBABILITY in prod compose
# OpenTelemetry trace export — failures are non-fatal (app starts cleanly without Tempo running) # OpenTelemetry trace export — failures are non-fatal (app starts cleanly without Tempo running)
# Port 4318 = OTLP HTTP (the default transport for Spring Boot's HttpExporter). # The default http://localhost:4317 ensures CI compatibility when no observability stack is present.
# Port 4317 is gRPC-only; sending HTTP/1.1 to it produces "Connection reset".
otel: otel:
service: service:
name: familienarchiv-backend name: familienarchiv-backend
exporter: exporter:
otlp: otlp:
endpoint: ${OTEL_EXPORTER_OTLP_ENDPOINT:http://localhost:4318} endpoint: ${OTEL_EXPORTER_OTLP_ENDPOINT:http://localhost:4317}
logs:
exporter: none # Promtail captures Docker logs; disable OTLP log export (Tempo only accepts traces)
metrics:
exporter: none # Prometheus scrapes /actuator/prometheus; disable OTLP metric export to Tempo
springdoc: springdoc:
api-docs: api-docs:

View File

@@ -1,63 +0,0 @@
package org.raddatz.familienarchiv;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.web.server.LocalManagementPort;
import org.springframework.context.annotation.Import;
import org.springframework.http.ResponseEntity;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.bean.override.mockito.MockitoBean;
import org.springframework.web.client.DefaultResponseErrorHandler;
import org.springframework.web.client.RestTemplate;
import software.amazon.awssdk.services.s3.S3Client;
import java.io.IOException;
import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
@ActiveProfiles("test")
@Import(PostgresContainerConfig.class)
class ActuatorPrometheusIT {
@LocalManagementPort
private int managementPort;
@MockitoBean
S3Client s3Client;
@Test
void prometheus_endpoint_returns_200_without_credentials() {
ResponseEntity<String> response = noThrowTemplate().getForEntity(
"http://localhost:" + managementPort + "/actuator/prometheus", String.class);
assertThat(response.getStatusCode().value()).isEqualTo(200);
}
@Test
void prometheus_endpoint_returns_jvm_metrics() {
ResponseEntity<String> response = noThrowTemplate().getForEntity(
"http://localhost:" + managementPort + "/actuator/prometheus", String.class);
assertThat(response.getBody()).contains("jvm_memory_used_bytes");
}
@Test
void actuator_metrics_requires_authentication() {
ResponseEntity<String> response = noThrowTemplate().getForEntity(
"http://localhost:" + managementPort + "/actuator/metrics", String.class);
assertThat(response.getStatusCode().value()).isEqualTo(401);
}
private RestTemplate noThrowTemplate() {
RestTemplate template = new RestTemplate();
template.setErrorHandler(new DefaultResponseErrorHandler() {
@Override
public boolean hasError(org.springframework.http.client.ClientHttpResponse response) throws IOException {
return false;
}
});
return template;
}
}

View File

@@ -142,11 +142,10 @@ services:
container_name: obs-grafana container_name: obs-grafana
restart: unless-stopped restart: unless-stopped
ports: ports:
- "127.0.0.1:${PORT_GRAFANA:-3003}:3000" - "127.0.0.1:${PORT_GRAFANA:-3001}:3000"
environment: environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:-changeme} GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:-changeme}
GF_USERS_ALLOW_SIGN_UP: "false" GF_USERS_ALLOW_SIGN_UP: "false"
GF_SERVER_ROOT_URL: ${GF_SERVER_ROOT_URL:-http://localhost:3003}
volumes: volumes:
- grafana_data:/var/lib/grafana - grafana_data:/var/lib/grafana
- ./infra/observability/grafana/provisioning:/etc/grafana/provisioning:ro - ./infra/observability/grafana/provisioning:/etc/grafana/provisioning:ro
@@ -194,7 +193,7 @@ services:
obs-glitchtip-db-init: obs-glitchtip-db-init:
condition: service_completed_successfully condition: service_completed_successfully
environment: environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST:-archive-db}:5432/glitchtip DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@archive-db:5432/glitchtip
REDIS_URL: redis://obs-redis:6379/0 REDIS_URL: redis://obs-redis:6379/0
SECRET_KEY: ${GLITCHTIP_SECRET_KEY} SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
GLITCHTIP_DOMAIN: ${GLITCHTIP_DOMAIN:-http://localhost:3002} GLITCHTIP_DOMAIN: ${GLITCHTIP_DOMAIN:-http://localhost:3002}
@@ -202,13 +201,7 @@ services:
EMAIL_URL: smtp://mailpit:1025 EMAIL_URL: smtp://mailpit:1025
GLITCHTIP_MAX_EVENT_LIFE_DAYS: 90 GLITCHTIP_MAX_EVENT_LIFE_DAYS: 90
ports: ports:
- "127.0.0.1:${PORT_GLITCHTIP:-3002}:8000" - "127.0.0.1:${PORT_GLITCHTIP:-3002}:8080"
healthcheck:
test: ["CMD", "bash", "-c", "echo > /dev/tcp/localhost/8000"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
networks: networks:
- archiv-net - archiv-net
- obs-net - obs-net
@@ -222,7 +215,7 @@ services:
obs-redis: obs-redis:
condition: service_healthy condition: service_healthy
environment: environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST:-archive-db}:5432/glitchtip DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@archive-db:5432/glitchtip
REDIS_URL: redis://obs-redis:6379/0 REDIS_URL: redis://obs-redis:6379/0
SECRET_KEY: ${GLITCHTIP_SECRET_KEY} SECRET_KEY: ${GLITCHTIP_SECRET_KEY}
networks: networks:
@@ -236,10 +229,10 @@ services:
environment: environment:
PGPASSWORD: ${POSTGRES_PASSWORD} PGPASSWORD: ${POSTGRES_PASSWORD}
command: > command: >
sh -c "psql -h ${POSTGRES_HOST:-archive-db} -U ${POSTGRES_USER} -tc sh -c "psql -h archive-db -U ${POSTGRES_USER} -tc
\"SELECT 1 FROM pg_database WHERE datname = 'glitchtip'\" | \"SELECT 1 FROM pg_database WHERE datname = 'glitchtip'\" |
grep -q 1 || grep -q 1 ||
psql -h ${POSTGRES_HOST:-archive-db} -U ${POSTGRES_USER} -c \"CREATE DATABASE glitchtip;\"" psql -h archive-db -U ${POSTGRES_USER} -c \"CREATE DATABASE glitchtip;\""
networks: networks:
- archiv-net - archiv-net

View File

@@ -213,11 +213,7 @@ services:
APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud} APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud}
SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true} SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true}
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true} SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true}
OTEL_EXPORTER_OTLP_ENDPOINT: http://tempo:4318 OTEL_EXPORTER_OTLP_ENDPOINT: http://tempo:4317
OTEL_LOGS_EXPORTER: none
OTEL_METRICS_EXPORTER: none
MANAGEMENT_METRICS_TAGS_APPLICATION: Familienarchiv
MANAGEMENT_TRACING_SAMPLING_PROBABILITY: ${MANAGEMENT_TRACING_SAMPLING_PROBABILITY:-0.1}
networks: networks:
- archiv-net - archiv-net
healthcheck: healthcheck:

View File

@@ -43,7 +43,7 @@ graph TD
- SSE notifications transit Caddy (browser → Caddy → backend); the backend is never reachable directly from the public internet. The SvelteKit SSR layer is bypassed for SSE, but Caddy is not. - SSE notifications transit Caddy (browser → Caddy → backend); the backend is never reachable directly from the public internet. The SvelteKit SSR layer is bypassed for SSE, but Caddy is not.
- The Caddyfile responds `404` on `/actuator/*` (defense in depth). Internal monitoring scrapes the backend on the docker network, not through Caddy. - The Caddyfile responds `404` on `/actuator/*` (defense in depth). Internal monitoring scrapes the backend on the docker network, not through Caddy.
- Production and staging cohabit on the same host via docker compose project names: `archiv-production` (ports 8080/3000) and `archiv-staging` (ports 8081/3001). - Production and staging cohabit on the same host via docker compose project names: `archiv-production` (ports 8080/3000) and `archiv-staging` (ports 8081/3001).
- An optional observability stack (Prometheus, Node Exporter, cAdvisor, Loki, Tempo, Grafana, GlitchTip) runs as a separate compose file. Configuration lives under `infra/observability/`. In production and CI, the stack is managed from `/opt/familienarchiv/` (CI copies it there on every nightly run) so bind mounts survive workspace wipes — see §4 for the ops procedure. - An optional observability stack (Prometheus, Node Exporter, cAdvisor) runs as a separate compose file: `docker compose -f docker-compose.observability.yml up -d`. It joins `archiv-net` and scrapes the backend's management port (`:8081`). Configuration lives under `infra/observability/`.
### OCR memory requirements ### OCR memory requirements
@@ -107,12 +107,8 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
| `MAIL_SMTP_AUTH` | SMTP auth enabled | `false` (dev) | YES (prod) | — | | `MAIL_SMTP_AUTH` | SMTP auth enabled | `false` (dev) | YES (prod) | — |
| `MAIL_STARTTLS_ENABLE` | STARTTLS enabled | `false` (dev) | YES (prod) | — | | `MAIL_STARTTLS_ENABLE` | STARTTLS enabled | `false` (dev) | YES (prod) | — |
| `SPRING_PROFILES_ACTIVE` | Spring profile | `dev,e2e` (compose) | YES | — | | `SPRING_PROFILES_ACTIVE` | Spring profile | `dev,e2e` (compose) | YES | — |
| `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP HTTP endpoint for distributed traces (Tempo). Port 4318 = HTTP transport; port 4317 is gRPC-only and causes "Connection reset" with Spring Boot's HttpExporter. | `http://localhost:4318` | — | — | | `OTEL_EXPORTER_OTLP_ENDPOINT` | OTLP gRPC endpoint for distributed traces (Tempo). Set to `http://tempo:4317` via compose. | `http://localhost:4317` | — | — |
| `OTEL_LOGS_EXPORTER` | Disable OTLP log export — Promtail captures Docker logs via the logging driver; Tempo does not accept logs. | `none` | — | — |
| `OTEL_METRICS_EXPORTER` | Disable OTLP metric export — Prometheus scrapes `/actuator/prometheus` via pull model; Tempo does not accept metrics. | `none` | — | — |
| `MANAGEMENT_METRICS_TAGS_APPLICATION` | Common tag added to every Micrometer metric. Required by Grafana's Spring Boot Observability dashboard (ID 17175) `label_values(application)` template variable. | `Familienarchiv` | — | — |
| `MANAGEMENT_TRACING_SAMPLING_PROBABILITY` | Micrometer tracing sample rate; overridden to `0.0` in test profile. | `0.1` (compose) / `1.0` (dev) | — | — | | `MANAGEMENT_TRACING_SAMPLING_PROBABILITY` | Micrometer tracing sample rate; overridden to `0.0` in test profile. | `0.1` (compose) / `1.0` (dev) | — | — |
| `SENTRY_DSN` | GlitchTip / Sentry DSN for backend error reporting. Leave empty to disable the SDK. Set after GlitchTip first-run (§4). | — | — | YES |
### PostgreSQL container ### PostgreSQL container
@@ -146,13 +142,11 @@ All vars are set in `.env` at the repo root (copy from `.env.example`). The back
| Variable | Purpose | Default | Required? | Sensitive? | | Variable | Purpose | Default | Required? | Sensitive? |
|---|---|---|---|---| |---|---|---|---|---|
| `PORT_PROMETHEUS` | Host port for the Prometheus UI (bound to `127.0.0.1` only) | `9090` | — | — | | `PORT_PROMETHEUS` | Host port for the Prometheus UI (bound to `127.0.0.1` only) | `9090` | — | — |
| `PORT_GRAFANA` | Host port for the Grafana UI (bound to `127.0.0.1` only) | `3003` | — | — | | `PORT_GRAFANA` | Host port for the Grafana UI (bound to `127.0.0.1` only) | `3001` | — | — |
| `POSTGRES_HOST` | PostgreSQL hostname for GlitchTip's db-init job and workers. Override when only the staging stack is running and `archive-db` is not resolvable by that name. | `archive-db` | — | — |
| `GRAFANA_ADMIN_PASSWORD` | Grafana `admin` user password | `changeme` | YES (prod) | YES | | `GRAFANA_ADMIN_PASSWORD` | Grafana `admin` user password | `changeme` | YES (prod) | YES |
| `PORT_GLITCHTIP` | Host port for the GlitchTip UI (bound to `127.0.0.1` only) | `3002` | — | — | | `PORT_GLITCHTIP` | Host port for the GlitchTip UI (bound to `127.0.0.1` only) | `3002` | — | — |
| `GLITCHTIP_DOMAIN` | Public-facing base URL for GlitchTip (used in email links and CORS) | `http://localhost:3002` | YES (prod) | — | | `GLITCHTIP_DOMAIN` | Public-facing base URL for GlitchTip (used in email links and CORS) | `http://localhost:3002` | YES (prod) | — |
| `GLITCHTIP_SECRET_KEY` | Django secret key for GlitchTip — generate with `python3 -c "import secrets; print(secrets.token_hex(32))"` | — | YES | YES | | `GLITCHTIP_SECRET_KEY` | Django secret key for GlitchTip — generate with `python3 -c "import secrets; print(secrets.token_hex(32))"` | — | YES | YES |
| `VITE_SENTRY_DSN` | GlitchTip/Sentry DSN for the frontend (SvelteKit) — injected at build time via Vite. Leave empty to disable. Set after GlitchTip first-run (§4). | — | — | YES |
--- ---
@@ -199,29 +193,6 @@ curl -fsSL https://tailscale.com/install.sh | sh && tailscale up
# files to disk during execution (cleaned up unconditionally on completion). # files to disk during execution (cleaned up unconditionally on completion).
# A multi-tenant runner would need to switch to stdin-piped env files. # A multi-tenant runner would need to switch to stdin-piped env files.
# (See https://docs.gitea.com/usage/actions/quickstart for the register step.) # (See https://docs.gitea.com/usage/actions/quickstart for the register step.)
# Runner workspace directory — required for DooD bind-mount resolution (ADR-015).
# act_runner stores job workspaces here so that docker compose bind mounts resolve
# to real host paths. The path must be identical on the host and inside job containers.
mkdir -p /srv/gitea-workspace
# Observability config permanent directory — the nightly CI job copies
# docker-compose.observability.yml and infra/observability/ here on every run.
# The obs stack is always started from this path, not from the workspace.
# See ADR-016 for why this directory is used instead of a server-pull approach.
mkdir -p /opt/familienarchiv/infra
# Both paths must also appear in the runner service volumes in ~/docker/gitea/compose.yaml:
# volumes:
# - /srv/gitea-workspace:/srv/gitea-workspace
# /opt/familienarchiv does NOT need to be in the runner container's volumes — job
# containers are spawned by the host daemon directly (DooD), so the host path is
# accessible to them as long as runner-config.yaml lists it in valid_volumes + options.
# See runner-config.yaml (workdir_parent + valid_volumes + options) and ADR-015/016.
# ⚠ IMPORTANT: after any change to runner-config.yaml (valid_volumes, options, workdir_parent),
# restart the Gitea Act runner for the new config to take effect:
# docker restart gitea-runner
# Until restarted, job containers are spawned with the old config and any new bind mounts
# (e.g. /opt/familienarchiv) will not be available inside job steps.
``` ```
### 3.2 DNS records ### 3.2 DNS records
@@ -255,7 +226,6 @@ git.raddatz.cloud A <server IP>
| `GRAFANA_ADMIN_PASSWORD` | both | Grafana `admin` login — generate a strong password | | `GRAFANA_ADMIN_PASSWORD` | both | Grafana `admin` login — generate a strong password |
| `GLITCHTIP_SECRET_KEY` | both | Django secret key — `openssl rand -hex 32` | | `GLITCHTIP_SECRET_KEY` | both | Django secret key — `openssl rand -hex 32` |
| `SENTRY_DSN` | both | GlitchTip project DSN — set after first-run (§4); leave empty to keep Sentry disabled | | `SENTRY_DSN` | both | GlitchTip project DSN — set after first-run (§4); leave empty to keep Sentry disabled |
| `VITE_SENTRY_DSN` | both | GlitchTip frontend project DSN — set after first-run (§4); leave empty to keep Sentry disabled |
### 3.4 First deploy ### 3.4 First deploy
@@ -283,9 +253,6 @@ Before the first deploy: rotate `PROD_APP_ADMIN_PASSWORD` to a strong value. Aft
## 4. Logs + observability ## 4. Logs + observability
> **Developer guide (where to look for what, LogQL queries, trace exploration) → [docs/OBSERVABILITY.md](./OBSERVABILITY.md).**
> This section covers the ops side: starting the stack, env vars, and CI wiring.
### First-response commands ### First-response commands
```bash ```bash
@@ -308,70 +275,13 @@ docker compose logs --tail=200 <service>
### Observability stack ### Observability stack
An observability stack is available via `docker-compose.observability.yml`. Configuration lives under `infra/observability/`. An observability stack is available via `docker-compose.observability.yml`. Configuration lives under `infra/observability/`. Start it after the main stack is up (which creates `archiv-net`):
#### Dev — start from the workspace
```bash ```bash
docker compose up -d # creates archiv-net docker compose up -d # creates archiv-net
docker compose -f docker-compose.observability.yml up -d docker compose -f docker-compose.observability.yml up -d
``` ```
#### Why the obs stack is managed differently from the main app stack
The main app stack (`docker-compose.prod.yml`) has no config-file bind mounts — its containers read config from env vars and image defaults. The workspace is wiped after each CI run but that does not affect running containers, because they hold no references to workspace paths.
The obs stack is different: `prometheus.yml`, `tempo.yml`, Loki config, Grafana provisioning files, and Promtail config are all bind-mounted from the host filesystem into their containers. If those source paths disappear (workspace wipe), the containers can restart fine until a `docker compose up` is run again — at that point Docker tries to re-resolve the bind-mount source and fails because the workspace path no longer exists.
The fix is to keep the obs compose file and config tree at a **permanent path** that CI copies to on every run but which survives between runs: `/opt/familienarchiv/` (see ADR-016).
#### Production — managed from `/opt/familienarchiv/`
Every CI run (nightly + release) copies `docker-compose.observability.yml` and `infra/observability/` to `/opt/familienarchiv/` before starting the stack. Bind mounts then resolve to `/opt/familienarchiv/infra/observability/…` — a stable path that outlasts any workspace wipe.
**Environment variables** follow the same two-source model as the main stack:
| Source | What it contains | Managed by |
|---|---|---|
| `infra/observability/obs.env` | All non-secret config (ports, URLs, hostnames) | Git — reviewed in PRs |
| `/opt/familienarchiv/obs-secrets.env` | Passwords and secret keys only | CI — written fresh from Gitea secrets on every deploy |
Both files are passed explicitly via `--env-file` to the compose command, so there is no implicit auto-read `.env` and no operator-managed file to keep in sync.
**Non-secret config** (`infra/observability/obs.env`):
| Key | Value | Notes |
|---|---|---|
| `PORT_GRAFANA` | `3003` | Avoids collision with staging frontend on port 3001 |
| `PORT_GLITCHTIP` | `3002` | |
| `PORT_PROMETHEUS` | `9090` | |
| `GF_SERVER_ROOT_URL` | `https://grafana.archiv.raddatz.cloud` | Required for alert email links and OAuth redirects |
| `GLITCHTIP_DOMAIN` | `https://glitchtip.archiv.raddatz.cloud` | Must match the Caddy vhost |
| `POSTGRES_HOST` | `archive-db` | Override if only the staging stack is running |
**Secret keys** (set in Gitea secrets, injected by CI into `obs-secrets.env`):
| Gitea secret | Notes |
|---|---|
| `GRAFANA_ADMIN_PASSWORD` | Strong unique password; shared by nightly and release |
| `GLITCHTIP_SECRET_KEY` | `openssl rand -hex 32`; shared by nightly and release |
| `STAGING_POSTGRES_PASSWORD` / `PROD_POSTGRES_PASSWORD` | Must match the running PostgreSQL container |
To start or restart the obs stack manually on the server (after CI has run at least once):
```bash
docker compose \
-f /opt/familienarchiv/docker-compose.observability.yml \
--env-file /opt/familienarchiv/infra/observability/obs.env \
--env-file /opt/familienarchiv/obs-secrets.env \
up -d --wait --remove-orphans
```
> **Note (manual ops only):** CI clears the destination with `rm -rf` before copying, so deleted files are removed automatically on the next run. If you copy manually with `cp -r` without first removing the directory, stale files from deleted configs will persist until cleaned up:
> ```bash
> rm /opt/familienarchiv/infra/observability/<path-to-removed-file>
> ```
Current services: Current services:
| Service | Image | Purpose | | Service | Image | Purpose |
@@ -380,11 +290,11 @@ Current services:
| `obs-node-exporter` | `prom/node-exporter:v1.9.0` | Host-level CPU / memory / disk / network metrics | | `obs-node-exporter` | `prom/node-exporter:v1.9.0` | Host-level CPU / memory / disk / network metrics |
| `obs-cadvisor` | `gcr.io/cadvisor/cadvisor:v0.52.1` | Per-container resource metrics | | `obs-cadvisor` | `gcr.io/cadvisor/cadvisor:v0.52.1` | Per-container resource metrics |
| `obs-loki` | `grafana/loki:3.4.2` | Log aggregation — receives log streams from Promtail. Port 3100 is `expose`-only (not host-bound). | | `obs-loki` | `grafana/loki:3.4.2` | Log aggregation — receives log streams from Promtail. Port 3100 is `expose`-only (not host-bound). |
| `obs-promtail` | `grafana/promtail:3.4.2` | Log shipping agent — reads all Docker container logs via the Docker socket and forwards them to Loki with `container_name`, `compose_service`, `compose_project`, and `job` labels. The `job` label is mapped from the Docker Compose service name (`com.docker.compose.service`) so that Grafana Loki dashboard queries (`{job="backend"}`, `{job="frontend"}`) work out of the box and the "App" variable dropdown is populated. | | `obs-promtail` | `grafana/promtail:3.4.2` | Log shipping agent — reads all Docker container logs via the Docker socket and forwards them to Loki with `container_name`, `compose_service`, and `compose_project` labels |
| `obs-tempo` | `grafana/tempo:2.7.2` | Distributed trace storage — OTLP HTTP receiver on port 4318 (`archiv-net`-internal; backend sends traces here). Grafana queries traces on port 3200 (`obs-net`-internal). All ports are `expose`-only (not host-bound). | | `obs-tempo` | `grafana/tempo:2.7.2` | Distributed trace storage — OTLP gRPC receiver on port 4317, OTLP HTTP on port 4318 (both `archiv-net`-internal). Grafana queries traces on port 3200 (`obs-net`-internal). All ports are `expose`-only (not host-bound). |
| `obs-grafana` | `grafana/grafana-oss:11.6.1` | Unified observability UI — metrics dashboards, log exploration, trace viewer. Bound to `127.0.0.1:${PORT_GRAFANA:-3003}` on the host. | | `obs-grafana` | `grafana/grafana-oss:11.6.1` | Unified observability UI — metrics dashboards, log exploration, trace viewer. Bound to `127.0.0.1:${PORT_GRAFANA:-3001}` on the host. |
| `obs-glitchtip` | `glitchtip/glitchtip:6.1.6` | Sentry-compatible error tracker. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces. Bound to `127.0.0.1:${PORT_GLITCHTIP:-3002}`. | | `obs-glitchtip` | `glitchtip/glitchtip:v4` | Sentry-compatible error tracker. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces. Bound to `127.0.0.1:${PORT_GLITCHTIP:-3002}`. |
| `obs-glitchtip-worker` | `glitchtip/glitchtip:6.1.6` | Celery + beat worker — processes async GlitchTip tasks (event ingestion, notifications, cleanup). | | `obs-glitchtip-worker` | `glitchtip/glitchtip:v4` | Celery + beat worker — processes async GlitchTip tasks (event ingestion, notifications, cleanup). |
| `obs-redis` | `redis:7-alpine` | Celery task broker for GlitchTip. Internal to `obs-net`; no host port exposed. | | `obs-redis` | `redis:7-alpine` | Celery task broker for GlitchTip. Internal to `obs-net`; no host port exposed. |
| `obs-glitchtip-db-init` | `postgres:16-alpine` | One-shot init container. Creates the `glitchtip` database on the existing `archive-db` PostgreSQL instance if it does not already exist. Runs at stack startup; exits cleanly once done. | | `obs-glitchtip-db-init` | `postgres:16-alpine` | One-shot init container. Creates the `glitchtip` database on the existing `archive-db` PostgreSQL instance if it does not already exist. Runs at stack startup; exits cleanly once done. |
@@ -392,7 +302,7 @@ Current services:
| Item | Value | | Item | Value |
|---|---| |---|---|
| URL | `http://localhost:3003` (or `http://localhost:$PORT_GRAFANA`) | | URL | `http://localhost:3001` (or `http://localhost:$PORT_GRAFANA`) |
| Username | `admin` | | Username | `admin` |
| Password | `$GRAFANA_ADMIN_PASSWORD` (default: `changeme`**change before exposing to a network**) | | Password | `$GRAFANA_ADMIN_PASSWORD` (default: `changeme`**change before exposing to a network**) |
@@ -422,7 +332,7 @@ docker exec obs-loki wget -qO- \
**Prefer `compose_service` over `container_name` in LogQL queries**`container_name` differs between dev (`archive-backend`) and prod (`archiv-production-backend-1`), while `compose_service` is stable (`backend`, `db`, `minio`, etc.). **Prefer `compose_service` over `container_name` in LogQL queries**`container_name` differs between dev (`archive-backend`) and prod (`archiv-production-backend-1`), while `compose_service` is stable (`backend`, `db`, `minio`, etc.).
Prometheus port `9090` and Grafana port `3003` (default; configurable via `PORT_GRAFANA`) are bound to `127.0.0.1` on the host. No other observability ports are host-bound. Prometheus port `9090` and Grafana port `3001` are bound to `127.0.0.1` on the host. No other observability ports are host-bound.
#### GlitchTip #### GlitchTip

View File

@@ -1,180 +0,0 @@
# Observability Guide
> **Ops reference (starting the stack, env vars, CI wiring) → [DEPLOYMENT.md §4](./DEPLOYMENT.md#4-logs--observability).**
> This file is for developers: what signal lives where, how to reach it, and what to look for.
## Where to look for what
| I want to… | Go to |
|---|---|
| See the last N log lines from the backend | `docker compose logs --tail=100 backend` |
| Search logs by keyword across time | Grafana → Explore → Loki |
| Understand why an HTTP request failed | Grafana → Explore → Loki → filter by `traceId` → follow link to Tempo |
| See a full distributed trace (DB queries, HTTP calls) | Grafana → Explore → Tempo → search by service or trace ID |
| Check JVM heap / GC / thread count | Grafana → Dashboards → Spring Boot Observability |
| Check HTTP error rate or p95 latency | Grafana → Dashboards → Spring Boot Observability |
| Check host CPU / memory / disk | Grafana → Dashboards → Node Exporter Full |
| See grouped application errors with stack traces | GlitchTip |
| Check if the backend is healthy | `curl http://localhost:8081/actuator/health` (on the server) |
| Check what Prometheus is scraping | `curl http://localhost:9090/api/v1/targets` (on the server) |
## Access
| Tool | External URL | Who it's for |
|---|---|---|
| Grafana | `https://grafana.archiv.raddatz.cloud` | Logs, metrics, traces — the primary observability UI |
| GlitchTip | `https://glitchtip.archiv.raddatz.cloud` | Grouped errors with stack traces and release tracking |
Loki, Tempo, and Prometheus have no external URL. They are internal services, accessible only through Grafana (or via SSH tunnel — see below).
## Logs (Loki)
Logs reach Loki via Promtail, which reads all Docker container logs from the Docker socket and ships them with labels derived from Docker Compose metadata.
### Labels available in every log line
| Label | What it contains | Example |
|---|---|---|
| `job` | Compose service name | `backend`, `frontend`, `db` |
| `compose_service` | Same as `job` | `backend` |
| `compose_project` | Compose project name | `archiv-staging`, `archiv-production` |
| `container_name` | Docker container name | `archiv-staging-backend-1` |
| `filename` | Docker log source | `/var/lib/docker/containers/…` |
**Use `job` in LogQL queries** — it is stable across dev, staging, and production. `container_name` changes between environments.
### Common LogQL queries in Grafana Explore
```logql
# All backend logs
{job="backend"}
# Backend ERROR and WARN lines only
{job="backend"} |= "ERROR" or {job="backend"} |= "WARN"
# All logs for a specific request (paste a traceId from a log line)
{job="backend"} |= "3fa85f64-5717-4562-b3fc-2c963f66afa6"
# Log lines containing a specific exception class
{job="backend"} |~ "DomainException|NullPointerException"
# Frontend logs
{job="frontend"}
# Database (slow query log, if enabled)
{job="db"}
```
### Log → Trace correlation
Spring Boot writes the active `traceId` into every log line when a request is being processed:
```
2026-05-16 ... INFO [Familienarchiv,3fa85f64...,1b2c3d4e] o.r.f.document.DocumentService : ...
```
In Grafana Explore → Loki, log lines with a `traceId` field show a **Tempo** link. Clicking it opens the full trace in Explore → Tempo without copying and pasting IDs.
This linking is configured in the Loki datasource provisioning via the `traceId` derived field regex. No manual setup required.
## Traces (Tempo)
The backend sends traces to Tempo via OTLP HTTP (port 4318). Every inbound HTTP request and every JPA query produces a span. Spans are linked into traces by the propagated `traceId` header.
### Finding a trace in Grafana
**Option A — from a log line:**
1. Grafana → Explore → select *Loki* datasource
2. Query `{job="backend"}` and find the failing request
3. Click the **Tempo** link in the log line (appears when `traceId` is present)
**Option B — by service:**
1. Grafana → Explore → select *Tempo* datasource
2. Query type: **Search**
3. Service name: `familienarchiv-backend`
4. Filter by HTTP status, duration, or operation name as needed
**Option C — by trace ID:**
1. Grafana → Explore → select *Tempo* datasource
2. Query type: **TraceQL** or **Trace ID**
3. Paste the trace ID
### What each span type tells you
| Root span name pattern | What it covers |
|---|---|
| `GET /api/documents`, `POST /api/documents` | Full HTTP request lifecycle |
| `SELECT archiv.*` | A single JPA/JDBC query inside that request |
| `HikariPool.getConnection` | Connection pool wait time |
A slow `SELECT` span inside an otherwise fast HTTP span pinpoints a missing index. A slow `HikariPool.getConnection` span indicates connection pool exhaustion.
### Sampling rate
- **Dev**: 100% of requests are traced (`management.tracing.sampling.probability: 1.0` in `application.yaml`)
- **Staging / Production**: 10% (`MANAGEMENT_TRACING_SAMPLING_PROBABILITY=0.1` in `docker-compose.prod.yml`)
To find a trace for a specific request in staging/production, either increase the sampling rate temporarily or trigger the request multiple times.
## Metrics (Prometheus → Grafana)
Prometheus scrapes the backend management endpoint every 15 s:
```
Target: backend:8081/actuator/prometheus
Labels: job="spring-boot", application="Familienarchiv"
```
All Spring Boot metrics carry the `application="Familienarchiv"` tag, which is how the Grafana Spring Boot Observability dashboard (ID 17175) filters to this service.
### Useful Prometheus queries (run on the server or via Grafana Explore → Prometheus)
```promql
# HTTP error rate (5xx) as a fraction of all requests
sum(rate(http_server_requests_seconds_count{status=~"5.."}[5m]))
/ sum(rate(http_server_requests_seconds_count[5m]))
# p95 response time
histogram_quantile(0.95, sum by (le) (
rate(http_server_requests_seconds_bucket[5m])
))
# JVM heap used
jvm_memory_used_bytes{area="heap", application="Familienarchiv"}
# Active DB connections
hikaricp_connections_active
```
## Errors (GlitchTip)
GlitchTip receives errors from both the backend (via Sentry Java SDK) and the frontend (via Sentry JavaScript SDK). It groups events by fingerprint, tracks first/last seen times, and links to the release that introduced the error.
GlitchTip complements Loki: use GlitchTip when you need **grouped, de-duplicated errors with stack traces and release attribution**; use Loki when you need **raw log lines with full context** or want to search across all log levels.
## Direct API access (debugging only)
Loki and Tempo bind no host ports. To reach them directly from your laptop, use an SSH tunnel through the server:
```bash
# Loki API on localhost:3100 (then query via curl or logcli)
ssh -L 3100:172.20.0.x:3100 root@raddatz.cloud
# Replace 172.20.0.x with the obs-loki container IP:
# docker inspect obs-loki --format '{{.NetworkSettings.Networks.archiv-obs-net.IPAddress}}'
# Tempo API on localhost:3200 (then query via curl or tempo-cli)
ssh -L 3200:172.20.0.x:3200 root@raddatz.cloud
```
In practice, Grafana Explore covers all common debugging workflows without needing direct API access.
## Signal summary
| Signal | Source | Transport | Storage | UI |
|---|---|---|---|---|
| Application logs | Spring Boot stdout → Docker log driver | Promtail → Loki push API | Loki | Grafana Explore → Loki |
| Distributed traces | Spring Boot OTel agent | OTLP HTTP → Tempo:4318 | Tempo | Grafana Explore → Tempo |
| JVM + HTTP metrics | Spring Actuator `/actuator/prometheus` | Prometheus pull (15 s) | Prometheus | Grafana dashboards |
| Host metrics | node-exporter | Prometheus pull | Prometheus | Grafana → Node Exporter Full |
| Container metrics | cAdvisor | Prometheus pull | Prometheus | Grafana (via Prometheus datasource) |
| Application errors | Sentry SDK | HTTP POST → GlitchTip ingest | GlitchTip DB | GlitchTip UI |

View File

@@ -1,69 +0,0 @@
# ADR-015: DooD workspace bind mount for Compose file bind-mount resolution
## Status
Accepted
## Context
The deploy workflows (`.gitea/workflows/nightly.yml`, `release.yml`) run job steps inside Docker containers via Docker-out-of-Docker (DooD): the Gitea runner mounts the host Docker socket, and act_runner spawns sibling containers for each job.
When a job step calls `docker compose -f docker-compose.observability.yml up`, Docker Compose resolves relative bind-mount sources against `$(pwd)` inside the job container and passes the resulting absolute paths to the **host** daemon. For example, `./infra/observability/prometheus/prometheus.yml` becomes `/some/path/infra/observability/prometheus/prometheus.yml`, and the host daemon tries to bind-mount that path from the **host filesystem**.
In the default DooD setup (`runner-config.yaml` with only `valid_volumes: ["/var/run/docker.sock"]`), job container workspaces live in the act_runner overlay2 layer. The host has no corresponding directory at the job container's `$(pwd)` path, so the daemon auto-creates an empty directory in its place. The container then fails to start because the mount target was expected to be a file, not a directory:
```
error mounting "…/prometheus/prometheus.yml" to rootfs at "/etc/prometheus/prometheus.yml": not a directory
```
This affected all five config file bind mounts in `docker-compose.observability.yml`.
## Decision
Configure act_runner to store job workspaces on a real host path (`/srv/gitea-workspace`) and mount that path into both the runner container and every job container at the **same absolute path**. The identity of the host path and container path is the key constraint: Compose resolves to an absolute path and hands it to the host daemon, which looks for that exact path on the host filesystem.
**runner-config.yaml changes:**
```yaml
container:
workdir_parent: /srv/gitea-workspace
valid_volumes:
- "/var/run/docker.sock"
- "/srv/gitea-workspace"
options: "-v /srv/gitea-workspace:/srv/gitea-workspace"
```
**Runner compose.yaml change** (host side — not in this repo):
```yaml
runner:
volumes:
- /srv/gitea-workspace:/srv/gitea-workspace
```
With this in place, `$(pwd)` inside a job container resolves to `/srv/gitea-workspace/<owner>/<repo>/`, which is a real directory on the host. Compose-managed bind mounts from that directory work without any additional steps.
## Alternatives Considered
| Alternative | Why rejected |
|---|---|
| **overlay2 `MergedDir` sync via privileged nsenter** (the previous approach, see PR #599 v1) | Required `--privileged --pid=host` (effective root on the host) plus fragile overlay2 driver assumption. Introduced stale-file risk on the host and a second stable path (`/srv/familienarchiv-*/obs-configs`) to maintain separately from the source tree. Replaced by this ADR. |
| **Build configs into a dedicated Docker image** (pattern used for MinIO bootstrap, see `infra/minio/Dockerfile`) | Viable for static files that change infrequently. Requires a build step and an image rebuild every time a config changes. Appropriate for bootstrap scripts; too heavy for frequently-tuned observability configs. |
| **Add workspace directory to runner-config `valid_volumes` only** (without `workdir_parent`) | `valid_volumes` whitelists paths that workflow steps may reference, but does not change where act_runner stores workspaces. Without `workdir_parent`, the workspace would still be in overlay2 and the bind-mount resolution problem would remain. |
| **Map workspace under a different host path than container path** (e.g. host `/srv/workspace`, container `/workspace`) | Compose resolves to the container-internal path (e.g. `/workspace/…`) and passes that to the host daemon. The host daemon interprets the source as a host path. If host `/workspace` does not exist, the daemon creates an empty directory — the original bug. The paths must be identical. |
## Consequences
- `/srv/gitea-workspace` must exist on the VPS before the runner starts. The directory was created as part of this change; it is not created automatically.
- The runner container's `compose.yaml` (maintained outside this repo at `~/docker/gitea/compose.yaml` on the VPS) must include the `- /srv/gitea-workspace:/srv/gitea-workspace` volume line. This is an out-of-band operational dependency; the prerequisite is documented in `runner-config.yaml`.
- `workdir_parent` applies to all jobs on this runner. Any future workflow that calls `docker compose` with relative bind mounts benefits automatically without further configuration.
- Job workspaces persist across runs under `/srv/gitea-workspace`. act_runner manages per-run subdirectory cleanup. Orphaned directories from interrupted runs should be cleaned up manually if disk space becomes a concern.
- Workflows that previously relied on `OBS_CONFIG_DIR` env var or the `obs-configs` stable path on the host no longer need those. Both were removed in this PR.
- This pattern does **not** apply to the `nsenter`-based Caddy reload step (ADR-012), which manages a host systemd service — a different problem class with no bind-mount equivalent.
## References
- ADR-011 — single-tenant runner trust model
- ADR-012 — nsenter via privileged container for host service management
- Issue #598 — original observability stack bind-mount failure
- `runner-config.yaml``workdir_parent`, `valid_volumes`, `options`

View File

@@ -1,57 +0,0 @@
# ADR-016: Observability stack co-location at `/opt/familienarchiv/` with CI-push config sync
## Status
Accepted
## Context
Issue #601 established that the observability stack must survive Gitea CI workspace wipes between nightly runs. When the nightly job completes, act_runner deletes the job workspace. Any Docker container that bind-mounts a config file from a workspace path (`/srv/gitea-workspace/…/infra/observability/prometheus/prometheus.yml`) then references a path that no longer exists on the host. On the next nightly run, Docker Compose either auto-creates an empty directory in its place (causing the container to fail to start because a file mount receives a directory) or finds a stale file from a previous run if the workspace happened to land at the same path.
ADR-015 solved the workspace bind-mount resolution problem: job workspaces are stored at `/srv/gitea-workspace` so `$(pwd)` inside the job container maps to a real host path. But it did not address persistence: the workspace is still wiped after the job, so bind mounts from workspace-relative paths remain fragile across runs.
### Decision drivers
1. Bind-mount sources must point to a host path that persists indefinitely, not to a path that disappears after each CI run.
2. Config files must reflect the committed state of the repo after every nightly run (no manual sync steps).
3. Secrets must not be written to the workspace or to any path managed by CI; they must survive independently of deployments.
4. The solution must not introduce new infrastructure dependencies (no SSH access from CI, no external registry, no additional server-side daemon).
### Alternatives considered
**A: Server-pull model** — a systemd timer or cron job on the server does `git pull` from the repo into `/opt/familienarchiv/` and then runs `docker compose up`. Rejected because: (1) requires git credentials on the server and a registered deploy key, (2) adds a second deployment mechanism that diverges from the CI-push model used for the main app stack, (3) timing coupling — the server pull must complete before CI's health checks run, requiring polling or a webhook.
**B: Separate directory (e.g. `/opt/obs/`)** — keeps obs configs isolated from the app stack. Rejected because: (1) the main app compose files are already in `/opt/familienarchiv/` (managed the same way), and (2) GlitchTip shares the `archive-db` PostgreSQL instance and `archiv-net` Docker network — it is architecturally part of the same deployment unit, not a separate one. Co-location reflects the actual coupling.
**C: Named Docker configs (Swarm)** — Docker Swarm supports first-class config objects that persist in the cluster. Rejected because the project does not use Swarm and introducing it solely for config persistence is a disproportionate dependency.
## Decision
The observability stack is co-located with the main application deployment at `/opt/familienarchiv/`:
- `docker-compose.observability.yml``/opt/familienarchiv/docker-compose.observability.yml`
- `infra/observability/``/opt/familienarchiv/infra/observability/`
Both the nightly CI job (`nightly.yml`) and the release job (`release.yml`) copy these files from the workspace checkout to `/opt/familienarchiv/` using `cp -r` on every run (CI-push model). Containers always read config from the permanent location; a workspace wipe has no effect on running containers.
Environment variables follow a two-source model:
- `infra/observability/obs.env` (git-tracked, non-secret): all non-sensitive config — host ports, public URLs (`GLITCHTIP_DOMAIN`, `GF_SERVER_ROOT_URL`), and the default `POSTGRES_HOST`. Changes go through PR review. No credentials.
- `/opt/familienarchiv/obs-secrets.env` (CI-written, per-deploy): passwords and secret keys only (`GRAFANA_ADMIN_PASSWORD`, `GLITCHTIP_SECRET_KEY`, `POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_HOST`), injected fresh from Gitea secrets on every nightly and release deploy. Gitea is the single source of truth for secrets — rotating a secret takes effect on the next deploy without manual server action.
Both files are passed explicitly via `--env-file` to every obs compose command (config dry-run and `up`). There is no implicit auto-read `.env`. The required key inventory is documented in `docs/DEPLOYMENT.md §4`.
The CI runner mounts `/opt/familienarchiv` as a bind mount into job containers (see `runner-config.yaml`). This requires a one-time `mkdir -p /opt/familienarchiv/infra` on the server and a runner restart after updating `runner-config.yaml` (see ADR-015 and `docs/DEPLOYMENT.md §3.1`).
## Consequences
**Positive:**
- Bind-mount sources survive workspace wipes by definition — they are on a persistent host path.
- Config is always in sync with the repo after each nightly run.
- No new infrastructure dependencies; the CI-push model mirrors how the main app stack is deployed.
- Secret rotation requires no manual server action — Gitea secrets are the authoritative store; `obs-secrets.env` is rewritten from scratch on every deploy so a secret change takes effect on the next nightly or release run.
**Negative:**
- `cp -r` does not remove deleted files; a config file removed from the repo persists in `/opt/familienarchiv/infra/observability/` until manually deleted. Acceptable for this project's change frequency. A `rsync -a --delete` would give a clean mirror if this becomes a problem.
- Mounting `/opt/familienarchiv/` into CI job containers expands the blast radius of a compromised workflow step — a malicious step could overwrite app compose files and Caddy config. Acceptable because the runner is single-tenant (trusted code only). See `runner-config.yaml` security comment.
- Runner must be restarted (`systemctl restart gitea-runner`) after any change to `runner-config.yaml` for the new mount to take effect.

View File

@@ -1,48 +0,0 @@
# ADR-017: Spring Boot 4.0 management port shares the main security filter chain
## Status
Accepted
## Context
The Familienarchiv backend runs Spring Boot Actuator on a dedicated management port (8081) so that Caddy never proxies `/actuator/*` requests and Prometheus can reach the scrape endpoint directly inside `archiv-net`.
In earlier Spring Boot versions (< 4.0), the management server ran in an isolated child application context whose security was governed independently by `ManagementWebSecurityAutoConfiguration`. The main app's `SecurityConfig` filter chain (port 8080) never intercepted requests arriving on port 8081.
In Spring Boot 4.0 with Jetty, this isolation was removed. The management server now traverses the **same** Spring Security `FilterChainProxy` as the main application. Concretely:
- Any `SecurityFilterChain` bean in the application context is evaluated for requests arriving on the management port.
- There is no longer a separate "management security" child context.
This was discovered when Prometheus began receiving HTTP 401 responses from `/actuator/prometheus` despite the endpoint being exposed and the `micrometer-registry-prometheus` dependency being present. Prometheus rejected these responses with `received unsupported Content-Type "text/html"` because the main filter chain's form-login `DelegatingAuthenticationEntryPoint` was redirecting unauthenticated requests to `/login` (302 → HTML).
A secondary issue: Spring Boot 4.0 no longer auto-enables Prometheus metrics export — `management.prometheus.metrics.export.enabled` must be set explicitly, and the Prometheus scrape endpoint requires `spring-boot-starter-micrometer-metrics` (a new starter that was split out in Spring Boot 4.0).
## Decision
1. **Dedicated management `SecurityFilterChain`** scoped to `/actuator/**` at `@Order(1)` (highest precedence). This chain:
- `permitAll()` for `/actuator/health` and `/actuator/prometheus` — required for Docker health checks and unauthenticated Prometheus scraping.
- `authenticated()` for all other actuator endpoints — blocks `/actuator/metrics`, `/actuator/info`, etc. without credentials.
- Uses an explicit `401` entry point (not form-login redirect) so that API clients — including Prometheus — receive a machine-readable status code rather than an HTML redirect.
- No CSRF, no form login.
2. **Belt-and-suspenders `permitAll()` in the main `SecurityFilterChain`** for `/actuator/health` and `/actuator/prometheus`, in case a future configuration change causes these paths to escape the management chain's `securityMatcher`.
3. **Network isolation as the outer defense boundary.** Port 8081 is not published in `docker-compose.yml` and is not routed through Caddy. Only services inside `archiv-net` (primarily Prometheus and the Docker health checker) can reach the management port.
## Alternatives rejected
- **Exclude `ManagementWebSecurityAutoConfiguration`:** This auto-configuration no longer exists in Spring Boot 4.0. Exclusion is not applicable.
- **Keep `SecurityConfig` as the sole filter chain without `@Order(1)` management chain:** The main chain's form-login `DelegatingAuthenticationEntryPoint` redirects browser-like clients to `/login` (302). Prometheus and automated health check clients cannot follow this redirect, so the endpoint would be unreachable without a dedicated chain that returns plain 401 or 200.
- **Per-endpoint `@Order(1)` filter chain using `EndpointRequest.toAnyEndpoint()`:** The `spring-boot-security` artifact that provides `EndpointRequest` is not a transitive dependency of `spring-boot-starter-actuator` in Spring Boot 4.0. Using a path-based `securityMatcher("/actuator/**")` achieves the same scoping without an extra dependency.
## Consequences
- All actuator endpoints on port 8081 that are not explicitly `permitAll()`-ed require HTTP Basic credentials. Without valid credentials, the response is 401 (not a redirect).
- Adding a new actuator endpoint to `management.endpoints.web.exposure.include` implicitly protects it via `anyRequest().authenticated()` in the management chain — no additional `permitAll()` needed unless intentional.
- A regression test (`ActuatorPrometheusIT`) verifies:
- `/actuator/prometheus` returns 200 without credentials.
- `/actuator/metrics` returns 401 without credentials.
- Prometheus metric names are present in the response body.
- If port 8081 is ever accidentally published in `docker-compose.yml`, actuator endpoints other than health and prometheus are still protected by HTTP Basic. This reduces (but does not eliminate) the risk of inadvertent exposure.

View File

@@ -17,16 +17,16 @@ System_Boundary(archiv, "Familienarchiv (Docker Compose)") {
Container(mc, "Bucket / Service-Account Init", "MinIO Client (mc)", "One-shot container on startup. Idempotent: creates the archive bucket, the archiv-app service account, and attaches the readwrite policy.") Container(mc, "Bucket / Service-Account Init", "MinIO Client (mc)", "One-shot container on startup. Idempotent: creates the archive bucket, the archiv-app service account, and attaches the readwrite policy.")
} }
System_Boundary(observability, "Observability Stack (/opt/familienarchiv/docker-compose.observability.yml)") { System_Boundary(observability, "Observability Stack (docker-compose.observability.yml)") {
Container(prometheus, "Prometheus", "prom/prometheus:v3.4.0", "Scrapes metrics from backend management port 8081 (/actuator/prometheus), node-exporter, and cAdvisor. Retention: 30 days.") Container(prometheus, "Prometheus", "prom/prometheus:v3.4.0", "Scrapes metrics from backend management port 8081 (/actuator/prometheus), node-exporter, and cAdvisor. Retention: 30 days.")
Container(node_exporter, "Node Exporter", "prom/node-exporter:v1.9.0", "Host-level CPU, memory, disk, and network metrics.") Container(node_exporter, "Node Exporter", "prom/node-exporter:v1.9.0", "Host-level CPU, memory, disk, and network metrics.")
Container(cadvisor, "cAdvisor", "gcr.io/cadvisor/cadvisor:v0.52.1", "Per-container resource metrics.") Container(cadvisor, "cAdvisor", "gcr.io/cadvisor/cadvisor:v0.52.1", "Per-container resource metrics.")
Container(loki, "Loki", "grafana/loki:3.4.2", "Stores log streams from all containers.") Container(loki, "Loki", "grafana/loki:3.4.2", "Stores log streams from all containers.")
Container(promtail, "Promtail", "grafana/promtail:3.4.2", "Ships Docker container logs to Loki via Docker SD.") Container(promtail, "Promtail", "grafana/promtail:3.4.2", "Ships Docker container logs to Loki via Docker SD.")
Container(tempo, "Tempo", "grafana/tempo:2.7.2", "Distributed trace storage. OTLP HTTP receiver on port 4318 (archiv-net). Grafana queries traces on port 3200 (obs-net). All ports internal only.") Container(tempo, "Tempo", "grafana/tempo:2.7.2", "Distributed trace storage. OTLP gRPC receiver on port 4317 (archiv-net). Grafana queries traces on port 3200 (obs-net). All ports internal only.")
Container(grafana, "Grafana", "grafana/grafana-oss:11.6.1", "Unified observability UI — dashboards, logs, traces. Datasources (Prometheus, Loki, Tempo) and three dashboards are auto-provisioned.") Container(grafana, "Grafana", "grafana/grafana-oss:11.6.1", "Unified observability UI — dashboards, logs, traces. Datasources (Prometheus, Loki, Tempo) and three dashboards are auto-provisioned.")
Container(glitchtip, "GlitchTip", "glitchtip/glitchtip:6.1.6", "Sentry-compatible error tracker — web process. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces.") Container(glitchtip, "GlitchTip", "glitchtip/glitchtip:v4", "Sentry-compatible error tracker — web process. Receives frontend + backend error events, groups by fingerprint, provides issue UI with stack traces.")
Container(obs_glitchtip_worker, "GlitchTip Worker", "glitchtip/glitchtip:6.1.6", "Celery + beat worker — async event ingestion, notifications, cleanup.") Container(obs_glitchtip_worker, "GlitchTip Worker", "glitchtip/glitchtip:v4", "Celery + beat worker — async event ingestion, notifications, cleanup.")
Container(obs_redis, "Redis", "redis:7-alpine", "Celery task queue for GlitchTip async workers.") Container(obs_redis, "Redis", "redis:7-alpine", "Celery task queue for GlitchTip async workers.")
} }
@@ -42,7 +42,7 @@ Rel(backend, mail, "Sends notification and password-reset emails (optional)", "S
Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned") Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
Rel(mc, storage, "Bootstraps bucket + service account on startup", "MinIO Client CLI") Rel(mc, storage, "Bootstraps bucket + service account on startup", "MinIO Client CLI")
Rel(promtail, loki, "Pushes log streams", "HTTP/Loki push API") Rel(promtail, loki, "Pushes log streams", "HTTP/Loki push API")
Rel(backend, tempo, "Sends distributed traces via OTLP", "HTTP / OTLP / port 4318 (archiv-net)") Rel(backend, tempo, "Sends distributed traces via OTLP", "gRPC / OTLP / port 4317 (archiv-net)")
Rel(grafana, prometheus, "Queries metrics", "HTTP 9090") Rel(grafana, prometheus, "Queries metrics", "HTTP 9090")
Rel(grafana, loki, "Queries logs", "HTTP 3100") Rel(grafana, loki, "Queries logs", "HTTP 3100")
Rel(grafana, tempo, "Queries traces", "HTTP 3200") Rel(grafana, tempo, "Queries traces", "HTTP 3200")

View File

@@ -19,39 +19,6 @@ Both containers live in the `gitea_gitea` Docker network on the VPS. The runner
The `gitea-runner` container mounts the host Docker socket (`/var/run/docker.sock`). When a workflow job runs, act_runner spawns a **sibling container** for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`), enabling `docker compose` calls in workflow steps. The `gitea-runner` container mounts the host Docker socket (`/var/run/docker.sock`). When a workflow job runs, act_runner spawns a **sibling container** for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`), enabling `docker compose` calls in workflow steps.
### Workspace bind-mount setup (DooD path resolution)
When a workflow step calls `docker compose up` with relative bind-mount sources (e.g. `./infra/observability/prometheus/prometheus.yml`), Compose resolves them against `$(pwd)` inside the job container and passes the resulting **absolute path** to the host Docker daemon. The host daemon then tries to bind-mount that path from the **host filesystem**.
In the default DooD setup the job container's workspace lives in the act_runner overlay2 layer — the host has no directory at that path, auto-creates an empty one, and the container fails with:
```
error mounting "…/prometheus/prometheus.yml" to rootfs at "/etc/prometheus/prometheus.yml": not a directory
```
**Solution (ADR-015):** store job workspaces on a real host path and mount it at the **same absolute path** inside the runner and every job container. `runner-config.yaml` configures this via `workdir_parent`, `valid_volumes`, and `options`.
**One-time host setup** (required on any fresh VPS):
```bash
mkdir -p /srv/gitea-workspace
# Then add to the runner service in ~/docker/gitea/compose.yaml:
# volumes:
# - /srv/gitea-workspace:/srv/gitea-workspace
# Restart the runner container for the change to take effect.
```
The path `/srv/gitea-workspace` is the canonical workspace root. It must be identical on the host and inside job containers — if the paths differ, Compose still resolves to the container-internal path, which the host daemon cannot find (the original bug).
**Disk management:** act_runner cleans per-run subdirectories on completion. Orphaned directories from interrupted runs accumulate under `/srv/gitea-workspace` and should be pruned manually if disk space becomes a concern:
```bash
# List workspace directories older than 7 days
find /srv/gitea-workspace -mindepth 3 -maxdepth 3 -type d -mtime +7
```
---
### Running host-level commands from CI (nsenter pattern) ### Running host-level commands from CI (nsenter pattern)
Job containers are unprivileged and do not share the host's PID/mount/network namespaces. Commands like `systemctl` that target the host daemon are therefore unavailable by default. When a workflow step needs to manage a host service (e.g. `systemctl reload caddy`), it uses the Docker socket to spin up a **privileged sibling container** in the host PID namespace: Job containers are unprivileged and do not share the host's PID/mount/network namespaces. Commands like `systemctl` that target the host daemon are therefore unavailable by default. When a workflow step needs to manage a host service (e.g. `systemctl reload caddy`), it uses the Docker socket to spin up a **privileged sibling container** in the host PID namespace:
@@ -141,33 +108,6 @@ nsenter: failed to execute /bin/systemctl: No such file or directory
The first error means the Docker socket is not mounted into the job container — check `valid_volumes` in `/root/docker/gitea/runner-config.yaml` on the VPS. The second means the Alpine image is running but cannot enter the host mount namespace; verify `--privileged` and `--pid=host` are both present in the workflow step. The first error means the Docker socket is not mounted into the job container — check `valid_volumes` in `/root/docker/gitea/runner-config.yaml` on the VPS. The second means the Alpine image is running but cannot enter the host mount namespace; verify `--privileged` and `--pid=host` are both present in the workflow step.
**Failure mode 4 — workspace bind-mount not configured (observability stack or any compose-with-file-mounts job)**
Symptom in CI log:
```
Error response from daemon: error while creating mount source path "…/prometheus/prometheus.yml": mkdir …: not a directory
```
Or the service starts but immediately crashes because a config file was mounted as an empty directory.
Cause: `/srv/gitea-workspace` does not exist on the host, or the runner container's `compose.yaml` is missing the `- /srv/gitea-workspace:/srv/gitea-workspace` volume line.
Diagnosis:
```bash
ssh root@<vps>
ls -la /srv/gitea-workspace # must exist and be a directory
docker inspect gitea-runner | grep -A5 Mounts # must show /srv/gitea-workspace
```
Recovery:
```bash
mkdir -p /srv/gitea-workspace
# Add volume line to runner compose.yaml, then:
docker compose -f ~/docker/gitea/compose.yaml up -d gitea-runner
```
See `docs/DEPLOYMENT.md §3.1` and ADR-015 for the full setup rationale.
--- ---
## Gitea vs GitHub Actions Differences ## Gitea vs GitHub Actions Differences

View File

@@ -12,11 +12,11 @@ The original spec in this doc proposed an overlay pattern (`docker compose -f do
--- ---
## Observability stack ## Observability stack — not yet deployed
The observability stack (Prometheus, Loki, Grafana, Tempo, GlitchTip) ships as a separate `docker-compose.observability.yml` alongside the main stack. Configuration lives under `infra/observability/`. Prometheus, Loki, Grafana, Alertmanager, Uptime Kuma, GlitchTip and ntfy are **not** part of the production deployment that #497 landed. They are tracked as follow-up issue #498.
→ See [docs/DEPLOYMENT.md §4](../DEPLOYMENT.md#4-logs--observability) for the full setup procedure, service URLs, first-run steps, and env var reference. When that lands the observability containers will join `docker-compose.prod.yml` under a dedicated profile so they can be operated alongside the application stack without affecting the application containers' restart cycle.
--- ---

View File

@@ -45,6 +45,7 @@ export default defineConfig(
files: ['**/*.svelte', '**/*.svelte.ts', '**/*.svelte.js'], files: ['**/*.svelte', '**/*.svelte.ts', '**/*.svelte.js'],
languageOptions: { languageOptions: {
parserOptions: { parserOptions: {
projectService: true,
extraFileExtensions: ['.svelte'], extraFileExtensions: ['.svelte'],
parser: ts.parser, parser: ts.parser,
svelteConfig svelteConfig

View File

@@ -1,24 +0,0 @@
# Non-secret observability stack configuration — tracked in git.
# Secret values (passwords, keys) are injected by CI from Gitea secrets
# into /opt/familienarchiv/obs-secrets.env at deploy time.
#
# For local dev the main .env file supplies these values instead;
# this file is only used in the CI/production path.
# Host ports (all bound to 127.0.0.1 — Caddy is the external entry point)
PORT_GRAFANA=3003
PORT_GLITCHTIP=3002
PORT_PROMETHEUS=9090
# Public URLs — used for internal redirects, alert email links, OAuth callbacks
GF_SERVER_ROOT_URL=https://grafana.archiv.raddatz.cloud
GLITCHTIP_DOMAIN=https://glitchtip.archiv.raddatz.cloud
POSTGRES_USER=archiv
# PostgreSQL hostname for GlitchTip db-init and workers.
# The actual value depends on the Compose project name — it is not a fixed string.
# CI sets POSTGRES_HOST in obs-secrets.env per environment:
# staging: archiv-staging-db-1 (project archiv-staging + service db)
# production: archiv-production-db-1 (project archiv-production + service db)
# For local dev, set POSTGRES_HOST in your .env file (defaults to archive-db there).

View File

@@ -15,6 +15,8 @@ scrape_configs:
metrics_path: /actuator/prometheus metrics_path: /actuator/prometheus
static_configs: static_configs:
# Uses the Docker service name (not container_name) for reliable DNS resolution. # Uses the Docker service name (not container_name) for reliable DNS resolution.
# Target will show as DOWN until backend instrumentation issue adds
# micrometer-registry-prometheus and exposes the endpoint — this is expected.
- targets: ['backend:8081'] - targets: ['backend:8081']
- job_name: ocr-service - job_name: ocr-service

View File

@@ -28,5 +28,3 @@ scrape_configs:
target_label: 'compose_project' target_label: 'compose_project'
- source_labels: ['__meta_docker_container_log_stream'] - source_labels: ['__meta_docker_container_log_stream']
target_label: 'logstream' target_label: 'logstream'
- source_labels: ['__meta_docker_container_label_com_docker_compose_service']
target_label: 'job'

View File

@@ -36,6 +36,9 @@ metrics_generator:
source: tempo source: tempo
storage: storage:
path: /var/tempo/generator/wal path: /var/tempo/generator/wal
processors:
- service-graphs
- span-metrics
# Tempo HTTP API (port 3200) is unauthenticated. Access is controlled entirely # Tempo HTTP API (port 3200) is unauthenticated. Access is controlled entirely
# by network isolation: only Grafana (on obs-net) should reach this port. # by network isolation: only Grafana (on obs-net) should reach this port.

View File

@@ -1,26 +1,16 @@
# runner-config.yaml — only the relevant section # runner-config.yaml — only the relevant section
container: container:
# join the same network Gitea is on, so job containers can resolve 'gitea' # passed as DOCKER_HOST inside the job container
# for actions/checkout and other internal API calls.
network: gitea_gitea
# passed as DOCKER_HOST inside the job container; act_runner auto-mounts
# this socket path into the job, so no explicit -v option is needed.
docker_host: "unix:///var/run/docker.sock" docker_host: "unix:///var/run/docker.sock"
# Job workspaces are stored here and mounted at the same absolute path # whitelists the socket path so workflows can mount it
# inside job containers. Identical host <-> container path is the requirement:
# Compose resolves relative bind mounts to $(pwd) inside the job container
# and passes that absolute path to the host daemon, which must find the file
# at that exact host path. Prerequisite: /srv/gitea-workspace exists on the
# host and is bind-mounted in the runner container (see compose.yaml).
workdir_parent: /srv/gitea-workspace
# whitelists volumes that workflow steps may bind-mount
valid_volumes: valid_volumes:
- "/var/run/docker.sock" - "/var/run/docker.sock"
- "/srv/gitea-workspace" # appended to `docker run` when the runner spawns a job container
- "/opt/familienarchiv" # SECURITY: Mounting the Docker socket grants job containers root-equivalent
# mount the workspace and the permanent obs/config directory into job containers. # access to the host Docker daemon. Acceptable here because only trusted code
# /opt/familienarchiv is the stable path CI copies configs to (ADR-016); it must # from this private repo runs on this runner. Do NOT use on a runner that
# be mounted here so deploy steps can write through to the host filesystem. # accepts untrusted PRs from external contributors.
options: "-v /srv/gitea-workspace:/srv/gitea-workspace -v /opt/familienarchiv:/opt/familienarchiv" options: "-v /var/run/docker.sock:/var/run/docker.sock"
# keep behavior default — Testcontainers handles its own networking # keep network mode default (bridge) — Testcontainers handles its own networking
force_pull: false force_pull: false