Compare commits
17 Commits
fix/issue-
...
08c7dbcaa2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08c7dbcaa2 | ||
|
|
a3f1260c23 | ||
|
|
e75ddc318b | ||
|
|
60cb9d5ad0 | ||
|
|
1ce0638ae6 | ||
|
|
f608838f7a | ||
|
|
52a96f657d | ||
|
|
f87504fb23 | ||
|
|
99de6f1d07 | ||
|
|
432ae2ac83 | ||
|
|
5f3529439a | ||
|
|
48c8bb8a5f | ||
|
|
023810df1e | ||
|
|
ad3b571bba | ||
|
|
9686e304c2 | ||
|
|
ea0b3050e4 | ||
|
|
21343cdf23 |
@@ -97,6 +97,39 @@ jobs:
|
||||
--profile staging \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Reload Caddy
|
||||
# Apply any committed Caddyfile changes before smoke-testing the
|
||||
# public surface. Without this step, a Caddyfile edit lands in the
|
||||
# repo but Caddy keeps serving the previous config until someone
|
||||
# reloads it manually — the smoke test would then catch a stale
|
||||
# header or a still-proxied /actuator route rather than confirming
|
||||
# the current config is live.
|
||||
#
|
||||
# The runner executes job steps inside Docker containers (DooD).
|
||||
# `systemctl` is not present in container images and cannot reach
|
||||
# the host's systemd directly. We use the Docker socket (mounted
|
||||
# into every job container via runner-config.yaml) to spin up a
|
||||
# privileged sibling container in the host PID namespace; nsenter
|
||||
# then enters the host's namespaces so systemctl talks to the real
|
||||
# host systemd daemon. No sudoers entry is required — the Docker
|
||||
# socket already grants root-equivalent host access.
|
||||
#
|
||||
# Alpine is used: ~5 MB vs ~70 MB for ubuntu, no unnecessary
|
||||
# tooling, and the digest is pinned so any upstream change requires
|
||||
# an explicit bump PR. util-linux (which ships nsenter) is installed
|
||||
# at run time; apk add takes ~1 s on the warm VPS cache.
|
||||
#
|
||||
# `reload` not `restart`: reload sends SIGHUP so Caddy re-reads its
|
||||
# config in-process without dropping TLS connections. `restart`
|
||||
# would briefly stop the service, losing in-flight requests.
|
||||
#
|
||||
# If Caddy is not running this step fails fast before the smoke test
|
||||
# issues a misleading "port 443 refused" error.
|
||||
run: |
|
||||
docker run --rm --privileged --pid=host \
|
||||
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
|
||||
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
|
||||
|
||||
- name: Smoke test deployed environment
|
||||
# Healthchecks confirm containers are healthy; they do NOT confirm the
|
||||
# public surface works. This step catches: Caddy not reloaded, HSTS
|
||||
|
||||
@@ -92,6 +92,18 @@ jobs:
|
||||
--env-file .env.production \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Reload Caddy
|
||||
# See nightly.yml — same rationale and mechanism: DooD job containers
|
||||
# cannot call systemctl directly; nsenter via a privileged sibling
|
||||
# container reaches the host systemd. Must run after deploy (so the
|
||||
# latest Caddyfile is on disk) and before the smoke test (so the
|
||||
# public surface reflects the current config). Alpine with pinned
|
||||
# digest; reload not restart — see nightly.yml for full rationale.
|
||||
run: |
|
||||
docker run --rm --privileged --pid=host \
|
||||
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
|
||||
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
|
||||
|
||||
- name: Smoke test deployed environment
|
||||
# See nightly.yml — same three checks, against the prod vhost.
|
||||
# --resolve pins archiv.raddatz.cloud to the runner's loopback so
|
||||
|
||||
@@ -0,0 +1,137 @@
|
||||
package org.raddatz.familienarchiv.security;
|
||||
|
||||
import jakarta.servlet.FilterChain;
|
||||
import jakarta.servlet.ServletException;
|
||||
import jakarta.servlet.http.Cookie;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletRequestWrapper;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import org.springframework.core.annotation.Order;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.web.filter.OncePerRequestFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URLDecoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
|
||||
/**
|
||||
* Promotes the {@code auth_token} cookie to an {@code Authorization} header
|
||||
* so that browser-side requests to {@code /api/*} authenticate the same way
|
||||
* SSR fetches do.
|
||||
*
|
||||
* <p>The SvelteKit login action stores the full HTTP Basic header value
|
||||
* ({@code "Basic <base64>"}) in an HttpOnly cookie. SSR fetches from
|
||||
* {@code hooks.server.ts} read the cookie and pass it explicitly as the
|
||||
* {@code Authorization} header. In the dev environment, Vite's proxy does
|
||||
* the same on every {@code /api/*} request (see {@code vite.config.ts}).
|
||||
* In production, Caddy proxies {@code /api/*} straight to the backend and
|
||||
* does NOT translate the cookie — so client-side {@code fetch} and
|
||||
* {@code EventSource} calls reach the backend without auth, get
|
||||
* {@code 401 WWW-Authenticate: Basic}, and the browser pops a native dialog.
|
||||
*
|
||||
* <p>This filter closes that gap: if a request has an {@code auth_token}
|
||||
* cookie but no explicit {@code Authorization} header, promote the cookie
|
||||
* value (URL-decoded) into the header before Spring Security inspects it.
|
||||
* Explicit {@code Authorization} headers are preserved unchanged.
|
||||
*
|
||||
* <p>See #520. Filter runs at {@code Ordered.HIGHEST_PRECEDENCE} so it
|
||||
* mutates the request before any Spring Security filter sees it.
|
||||
*
|
||||
* <p><b>Scope:</b> only {@code /api/*} requests are touched. The
|
||||
* {@code /actuator/*} block in Caddy plus the open auth/reset paths in
|
||||
* {@link SecurityConfig} must NOT receive a promoted Authorization.
|
||||
*
|
||||
* <p><b>⚠ Log-leakage warning:</b> the wrapped request exposes the
|
||||
* Authorization header via {@code getHeaderNames}/{@code getHeaders}. Any
|
||||
* filter or interceptor that iterates request headers will see the live
|
||||
* Basic credential. Do NOT add a request-header logger downstream of this
|
||||
* filter without explicitly scrubbing the {@code Authorization} field.
|
||||
*/
|
||||
@Component
|
||||
@Order(org.springframework.core.Ordered.HIGHEST_PRECEDENCE)
|
||||
public class AuthTokenCookieFilter extends OncePerRequestFilter {
|
||||
|
||||
static final String COOKIE_NAME = "auth_token";
|
||||
static final String SCOPE_PREFIX = "/api/";
|
||||
|
||||
@Override
|
||||
protected void doFilterInternal(HttpServletRequest request,
|
||||
HttpServletResponse response,
|
||||
FilterChain chain) throws ServletException, IOException {
|
||||
// Scope: only /api/* needs cookie promotion. /actuator/health (open),
|
||||
// /api/auth/forgot-password (open), /login etc. don't.
|
||||
if (!request.getRequestURI().startsWith(SCOPE_PREFIX)) {
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
// An explicit Authorization header wins — this is the SSR fetch path
|
||||
// (hooks.server.ts builds the header itself).
|
||||
if (request.getHeader(HttpHeaders.AUTHORIZATION) != null) {
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
Cookie[] cookies = request.getCookies();
|
||||
if (cookies == null) {
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
for (Cookie c : cookies) {
|
||||
if (COOKIE_NAME.equals(c.getName()) && c.getValue() != null && !c.getValue().isBlank()) {
|
||||
String decoded;
|
||||
try {
|
||||
decoded = URLDecoder.decode(c.getValue(), StandardCharsets.UTF_8);
|
||||
} catch (IllegalArgumentException malformed) {
|
||||
// Malformed percent-encoding — refuse to forward a bogus
|
||||
// Authorization header. Spring Security will treat the
|
||||
// request as unauthenticated.
|
||||
chain.doFilter(request, response);
|
||||
return;
|
||||
}
|
||||
chain.doFilter(new AuthHeaderRequest(request, decoded), response);
|
||||
return;
|
||||
}
|
||||
}
|
||||
chain.doFilter(request, response);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds (or overrides) the {@code Authorization} header on a wrapped request.
|
||||
* All other headers pass through unchanged.
|
||||
*/
|
||||
static final class AuthHeaderRequest extends HttpServletRequestWrapper {
|
||||
private final String authorization;
|
||||
|
||||
AuthHeaderRequest(HttpServletRequest request, String authorization) {
|
||||
super(request);
|
||||
this.authorization = authorization;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHeader(String name) {
|
||||
if (HttpHeaders.AUTHORIZATION.equalsIgnoreCase(name)) {
|
||||
return authorization;
|
||||
}
|
||||
return super.getHeader(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration<String> getHeaders(String name) {
|
||||
if (HttpHeaders.AUTHORIZATION.equalsIgnoreCase(name)) {
|
||||
return Collections.enumeration(Collections.singletonList(authorization));
|
||||
}
|
||||
return super.getHeaders(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration<String> getHeaderNames() {
|
||||
Enumeration<String> base = super.getHeaderNames();
|
||||
java.util.Set<String> names = new java.util.LinkedHashSet<>();
|
||||
while (base.hasMoreElements()) names.add(base.nextElement());
|
||||
names.add(HttpHeaders.AUTHORIZATION);
|
||||
return Collections.enumeration(names);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -37,12 +37,20 @@ public class SecurityConfig {
|
||||
@Bean
|
||||
public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
|
||||
http
|
||||
// CSRF is intentionally disabled: every request from the SvelteKit frontend
|
||||
// carries an explicit Authorization header (Basic Auth token injected by
|
||||
// hooks.server.ts). Browsers block cross-origin requests from setting custom
|
||||
// headers, so cross-site request forgery via a third-party page is not
|
||||
// possible with this auth scheme. If the auth model ever changes to
|
||||
// cookie-based sessions, CSRF protection must be re-enabled.
|
||||
// CSRF is intentionally disabled. With the cookie-promotion model
|
||||
// (auth_token cookie → Authorization header via AuthTokenCookieFilter,
|
||||
// see #520), every authenticated request to /api/* now carries the
|
||||
// credential automatically once the cookie is set. The CSRF defence
|
||||
// for state-changing endpoints is therefore LOAD-BEARING on:
|
||||
//
|
||||
// 1. SameSite=strict on the auth_token cookie (login/+page.server.ts).
|
||||
// A cross-site POST from evil.com cannot include the cookie.
|
||||
// 2. CORS — Spring's default rejects cross-origin requests with
|
||||
// credentials unless explicitly allowed (no allowedOrigins config).
|
||||
//
|
||||
// If either of those is ever weakened (e.g. cookie flipped to
|
||||
// SameSite=lax, CORS allowedOrigins expanded), CSRF protection
|
||||
// MUST be re-enabled here.
|
||||
.csrf(csrf -> csrf.disable())
|
||||
|
||||
.authorizeHttpRequests(auth -> {
|
||||
|
||||
@@ -20,6 +20,7 @@ import org.springframework.boot.CommandLineRunner;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
import org.springframework.core.env.Environment;
|
||||
import org.springframework.security.crypto.password.PasswordEncoder;
|
||||
|
||||
import java.time.LocalDate;
|
||||
@@ -31,26 +32,51 @@ import java.util.Set;
|
||||
@DependsOn("flyway")
|
||||
public class UserDataInitializer {
|
||||
|
||||
@Value("${app.admin.email:admin@familyarchive.local}")
|
||||
static final String DEFAULT_ADMIN_EMAIL = "admin@familienarchiv.local";
|
||||
static final String DEFAULT_ADMIN_PASSWORD = "admin123";
|
||||
|
||||
@Value("${app.admin.email:" + DEFAULT_ADMIN_EMAIL + "}")
|
||||
private String adminEmail;
|
||||
|
||||
@Value("${app.admin.password:admin123}")
|
||||
@Value("${app.admin.password:" + DEFAULT_ADMIN_PASSWORD + "}")
|
||||
private String adminPassword;
|
||||
|
||||
private final AppUserRepository userRepository;
|
||||
private final UserGroupRepository groupRepository;
|
||||
private final Environment environment;
|
||||
|
||||
@Bean
|
||||
public CommandLineRunner initAdminUser(PasswordEncoder passwordEncoder) {
|
||||
return args -> {
|
||||
if (userRepository.findByEmail(adminEmail).isEmpty()) {
|
||||
// Fail-closed in production: refuse to seed with the well-known
|
||||
// defaults. Otherwise an operator who forgets APP_ADMIN_USERNAME
|
||||
// / APP_ADMIN_PASSWORD locks production to admin@…/admin123 PERMANENTLY
|
||||
// (UserDataInitializer only seeds when the row is missing — see #513).
|
||||
// Allowed in dev/test/e2e because those run without secrets configured.
|
||||
boolean isLocalProfile = environment.matchesProfiles("dev", "test", "e2e");
|
||||
if (!isLocalProfile
|
||||
&& (DEFAULT_ADMIN_EMAIL.equals(adminEmail)
|
||||
|| DEFAULT_ADMIN_PASSWORD.equals(adminPassword))) {
|
||||
throw new IllegalStateException(
|
||||
"Refusing to seed admin user with default credentials outside "
|
||||
+ "the dev/test/e2e profiles. Set APP_ADMIN_USERNAME and "
|
||||
+ "APP_ADMIN_PASSWORD to non-default values before first boot — "
|
||||
+ "this lock-in is permanent."
|
||||
);
|
||||
}
|
||||
log.info("Kein Admin-User '{}' gefunden. Erstelle Default-Admin...", adminEmail);
|
||||
|
||||
UserGroup adminGroup = UserGroup.builder()
|
||||
.name("Administrators")
|
||||
.permissions(Set.of("ADMIN", "READ_ALL", "WRITE_ALL", "ANNOTATE_ALL", "ADMIN_USER", "ADMIN_TAG", "ADMIN_PERMISSION"))
|
||||
.build();
|
||||
groupRepository.save(adminGroup);
|
||||
// Reuse the Administrators group if it already exists (e.g. a
|
||||
// previous boot seeded the group but failed before creating
|
||||
// the admin user, or the operator deleted just the user row
|
||||
// to retry the seed with a new email). Blind-INSERTing would
|
||||
// violate user_groups_name_key and abort the context. See #518.
|
||||
UserGroup adminGroup = groupRepository.findByName("Administrators")
|
||||
.orElseGet(() -> groupRepository.save(UserGroup.builder()
|
||||
.name("Administrators")
|
||||
.permissions(Set.of("ADMIN", "READ_ALL", "WRITE_ALL", "ANNOTATE_ALL", "ADMIN_USER", "ADMIN_TAG", "ADMIN_PERMISSION"))
|
||||
.build()));
|
||||
|
||||
AppUser admin = AppUser.builder()
|
||||
.email(adminEmail)
|
||||
|
||||
@@ -69,7 +69,11 @@ app:
|
||||
from: ${APP_MAIL_FROM:noreply@familienarchiv.local}
|
||||
|
||||
admin:
|
||||
username: ${APP_ADMIN_USERNAME:admin}
|
||||
# Key must be `email`, not `username` — UserDataInitializer reads
|
||||
# `${app.admin.email:...}`. The env-var name stays APP_ADMIN_USERNAME
|
||||
# to match the existing Gitea secrets and DEPLOYMENT.md §3.3.
|
||||
# See #513.
|
||||
email: ${APP_ADMIN_USERNAME:admin@familienarchiv.local}
|
||||
password: ${APP_ADMIN_PASSWORD:admin123}
|
||||
|
||||
import:
|
||||
|
||||
@@ -10,6 +10,7 @@ import org.raddatz.familienarchiv.document.DocumentStatus;
|
||||
import org.raddatz.familienarchiv.document.DocumentRepository;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.test.context.DynamicPropertyRegistry;
|
||||
import org.springframework.test.context.DynamicPropertySource;
|
||||
@@ -41,6 +42,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
* test pyramid mocks at the FileService boundary.
|
||||
*/
|
||||
@SpringBootTest
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
class ThumbnailServiceIntegrationTest {
|
||||
|
||||
|
||||
@@ -0,0 +1,134 @@
|
||||
package org.raddatz.familienarchiv.security;
|
||||
|
||||
import jakarta.servlet.FilterChain;
|
||||
import jakarta.servlet.http.Cookie;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.springframework.mock.web.MockHttpServletRequest;
|
||||
import org.springframework.mock.web.MockHttpServletResponse;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* The filter must turn a browser-side {@code Cookie: auth_token=Basic%20<base64>}
|
||||
* into {@code Authorization: Basic <base64>} (URL-decoded) so that Spring's
|
||||
* Basic-auth filter accepts it. Skips when the request already has an explicit
|
||||
* {@code Authorization} header, or when no {@code auth_token} cookie is present.
|
||||
*
|
||||
* <p>See #520.
|
||||
*/
|
||||
class AuthTokenCookieFilterTest {
|
||||
|
||||
private final AuthTokenCookieFilter filter = new AuthTokenCookieFilter();
|
||||
|
||||
@Test
|
||||
void promotes_url_encoded_auth_token_cookie_to_decoded_Authorization_header() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.setCookies(new Cookie("auth_token", "Basic%20YWRtaW5AZmFtaWx5YXJjaGl2ZS5sb2NhbDpzZWNyZXQ%3D"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
ArgumentCaptor<HttpServletRequest> captor = ArgumentCaptor.forClass(HttpServletRequest.class);
|
||||
verify(chain, times(1)).doFilter(captor.capture(), org.mockito.ArgumentMatchers.any(HttpServletResponse.class));
|
||||
|
||||
HttpServletRequest forwarded = captor.getValue();
|
||||
assertThat(forwarded.getHeader("Authorization"))
|
||||
.as("Authorization must be URL-decoded so Spring's Basic parser sees a literal space")
|
||||
.isEqualTo("Basic YWRtaW5AZmFtaWx5YXJjaGl2ZS5sb2NhbDpzZWNyZXQ=");
|
||||
}
|
||||
|
||||
@Test
|
||||
void preserves_explicit_Authorization_header_and_ignores_cookie() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.addHeader("Authorization", "Basic explicit-header-wins");
|
||||
req.setCookies(new Cookie("auth_token", "Basic%20cookie-would-have-promoted"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
// Forwards the original request unchanged — same instance, no wrapping.
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_when_no_cookies_at_all() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_when_auth_token_cookie_is_absent() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.setCookies(new Cookie("some_other_cookie", "value"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_when_auth_token_cookie_is_empty() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
req.setCookies(new Cookie("auth_token", ""));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_unchanged_when_request_is_outside_api_scope() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
// /actuator/health and similar must NOT receive a promoted Authorization
|
||||
// header — they have their own access rules and should never be authed
|
||||
// via the cookie.
|
||||
req.setRequestURI("/actuator/health");
|
||||
req.setCookies(new Cookie("auth_token", "Basic%20YWR=="));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
// Forwards the original request unchanged — same instance, no wrapping.
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
|
||||
@Test
|
||||
void passes_through_unchanged_when_cookie_value_is_malformed_percent_encoding() throws Exception {
|
||||
MockHttpServletRequest req = new MockHttpServletRequest();
|
||||
req.setRequestURI("/api/users/me");
|
||||
// Lone "%" without two hex digits → URLDecoder throws → filter must
|
||||
// refuse to forward a bogus Authorization header.
|
||||
req.setCookies(new Cookie("auth_token", "Basic%2"));
|
||||
MockHttpServletResponse res = new MockHttpServletResponse();
|
||||
FilterChain chain = mock(FilterChain.class);
|
||||
|
||||
filter.doFilter(req, res, chain);
|
||||
|
||||
// Forwards the original request unchanged — Spring Security treats it
|
||||
// as unauthenticated rather than crashing on bad input.
|
||||
verify(chain).doFilter(req, res);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,174 @@
|
||||
package org.raddatz.familienarchiv.user;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.springframework.boot.CommandLineRunner;
|
||||
import org.springframework.core.env.Environment;
|
||||
import org.springframework.security.crypto.password.PasswordEncoder;
|
||||
import org.springframework.test.util.ReflectionTestUtils;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* UserDataInitializer must refuse to seed the admin user with the hardcoded
|
||||
* dev defaults when running outside the {@code dev} profile.
|
||||
*
|
||||
* <p>Why this matters: per DEPLOYMENT.md §3.5 and ADR-011, the admin password
|
||||
* is permanently locked on first deploy (UserDataInitializer only seeds when
|
||||
* the row is missing). If an operator forgets to set {@code APP_ADMIN_USERNAME}
|
||||
* / {@code APP_ADMIN_PASSWORD}, prod silently boots with the well-known dev
|
||||
* defaults — a credential-disclosure foot-gun, not a config typo. See #513.
|
||||
*/
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class AdminSeedFailClosedTest {
|
||||
|
||||
@Mock AppUserRepository userRepository;
|
||||
@Mock UserGroupRepository groupRepository;
|
||||
@Mock Environment environment;
|
||||
@Mock PasswordEncoder passwordEncoder;
|
||||
|
||||
UserDataInitializer initializer;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
initializer = new UserDataInitializer(userRepository, groupRepository, environment);
|
||||
}
|
||||
|
||||
@Test
|
||||
void refuses_to_seed_when_email_is_default_and_profile_is_not_dev() throws Exception {
|
||||
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
|
||||
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
|
||||
ReflectionTestUtils.setField(initializer, "adminEmail", UserDataInitializer.DEFAULT_ADMIN_EMAIL);
|
||||
ReflectionTestUtils.setField(initializer, "adminPassword", "operator-set-this-one");
|
||||
|
||||
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
|
||||
|
||||
assertThatThrownBy(() -> runner.run())
|
||||
.isInstanceOf(IllegalStateException.class)
|
||||
.hasMessageContaining("default credentials")
|
||||
.hasMessageContaining("permanent");
|
||||
|
||||
verify(userRepository, never()).save(org.mockito.ArgumentMatchers.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
void refuses_to_seed_when_password_is_default_and_profile_is_not_dev() throws Exception {
|
||||
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
|
||||
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
|
||||
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
|
||||
ReflectionTestUtils.setField(initializer, "adminPassword", UserDataInitializer.DEFAULT_ADMIN_PASSWORD);
|
||||
|
||||
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
|
||||
|
||||
assertThatThrownBy(() -> runner.run())
|
||||
.isInstanceOf(IllegalStateException.class)
|
||||
.hasMessageContaining("default credentials");
|
||||
}
|
||||
|
||||
@Test
|
||||
void allows_seed_when_both_values_are_set_and_profile_is_not_dev() throws Exception {
|
||||
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
|
||||
when(groupRepository.findByName("Administrators")).thenReturn(Optional.empty());
|
||||
when(groupRepository.save(any(UserGroup.class))).thenAnswer(inv -> inv.getArgument(0));
|
||||
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
|
||||
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
|
||||
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
|
||||
ReflectionTestUtils.setField(initializer, "adminPassword", "a-real-strong-password");
|
||||
|
||||
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
|
||||
runner.run();
|
||||
|
||||
verify(userRepository).save(any(AppUser.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
void allows_seed_with_defaults_when_profile_is_dev() throws Exception {
|
||||
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
|
||||
when(groupRepository.findByName("Administrators")).thenReturn(Optional.empty());
|
||||
when(groupRepository.save(any(UserGroup.class))).thenAnswer(inv -> inv.getArgument(0));
|
||||
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(true);
|
||||
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
|
||||
ReflectionTestUtils.setField(initializer, "adminEmail", UserDataInitializer.DEFAULT_ADMIN_EMAIL);
|
||||
ReflectionTestUtils.setField(initializer, "adminPassword", UserDataInitializer.DEFAULT_ADMIN_PASSWORD);
|
||||
|
||||
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
|
||||
runner.run();
|
||||
|
||||
verify(userRepository).save(any(AppUser.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
void does_not_check_defaults_when_admin_already_exists() throws Exception {
|
||||
AppUser existing = AppUser.builder()
|
||||
.email("someone@example.com")
|
||||
.password("$2a$10$stub")
|
||||
.build();
|
||||
when(userRepository.findByEmail(anyString())).thenReturn(Optional.of(existing));
|
||||
ReflectionTestUtils.setField(initializer, "adminEmail", UserDataInitializer.DEFAULT_ADMIN_EMAIL);
|
||||
ReflectionTestUtils.setField(initializer, "adminPassword", UserDataInitializer.DEFAULT_ADMIN_PASSWORD);
|
||||
|
||||
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
|
||||
runner.run();
|
||||
|
||||
verify(userRepository, never()).save(org.mockito.ArgumentMatchers.any());
|
||||
// Importantly, no IllegalStateException — re-deploys must not panic over
|
||||
// historical default-seeded data they cannot retroactively fix.
|
||||
}
|
||||
|
||||
@Test
|
||||
void reuses_existing_Administrators_group_when_seeding_a_new_admin() throws Exception {
|
||||
// Setup: admin user does not exist, but the Administrators group does
|
||||
// (e.g. previous boot seeded the group then failed; operator deleted
|
||||
// the bad user row to retry with a corrected APP_ADMIN_USERNAME). The
|
||||
// re-seed must reuse the group, not blind-INSERT a duplicate. See #518.
|
||||
UserGroup existingGroup = UserGroup.builder()
|
||||
.name("Administrators")
|
||||
.build();
|
||||
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
|
||||
when(groupRepository.findByName("Administrators")).thenReturn(Optional.of(existingGroup));
|
||||
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
|
||||
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
|
||||
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
|
||||
ReflectionTestUtils.setField(initializer, "adminPassword", "a-real-strong-password");
|
||||
|
||||
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
|
||||
runner.run();
|
||||
|
||||
// Group must not be re-inserted — that would violate user_groups_name_key.
|
||||
verify(groupRepository, never()).save(any(UserGroup.class));
|
||||
// But the admin user IS created, with the existing group attached.
|
||||
org.mockito.ArgumentCaptor<AppUser> captor = org.mockito.ArgumentCaptor.forClass(AppUser.class);
|
||||
verify(userRepository).save(captor.capture());
|
||||
assertThat(captor.getValue().getGroups()).containsExactly(existingGroup);
|
||||
}
|
||||
|
||||
@Test
|
||||
void creates_Administrators_group_when_seeding_admin_on_a_fresh_database() throws Exception {
|
||||
when(userRepository.findByEmail(anyString())).thenReturn(Optional.empty());
|
||||
when(groupRepository.findByName("Administrators")).thenReturn(Optional.empty());
|
||||
when(groupRepository.save(any(UserGroup.class))).thenAnswer(inv -> inv.getArgument(0));
|
||||
when(environment.matchesProfiles("dev", "test", "e2e")).thenReturn(false);
|
||||
when(passwordEncoder.encode(anyString())).thenReturn("$2a$10$stub");
|
||||
ReflectionTestUtils.setField(initializer, "adminEmail", "admin@archiv.raddatz.cloud");
|
||||
ReflectionTestUtils.setField(initializer, "adminPassword", "a-real-strong-password");
|
||||
|
||||
CommandLineRunner runner = initializer.initAdminUser(passwordEncoder);
|
||||
runner.run();
|
||||
|
||||
// Group should be inserted exactly once.
|
||||
verify(groupRepository).save(any(UserGroup.class));
|
||||
verify(userRepository).save(any(AppUser.class));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
package org.raddatz.familienarchiv.user;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.beans.factory.config.YamlPropertiesFactoryBean;
|
||||
import org.springframework.boot.context.properties.bind.Binder;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertySources;
|
||||
import org.springframework.core.env.PropertiesPropertySource;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.Properties;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* Pins the admin-seed property key contract. {@code UserDataInitializer} reads
|
||||
* {@code @Value("${app.admin.email:...}")} and {@code @Value("${app.admin.password:...}")}.
|
||||
* The yaml MUST expose those exact keys, not e.g. {@code app.admin.username}, or
|
||||
* the env vars {@code APP_ADMIN_USERNAME} / {@code APP_ADMIN_PASSWORD} are
|
||||
* silently ignored and the admin user gets seeded with the hardcoded defaults.
|
||||
*
|
||||
* <p>Discovered as a HIGH bug during the production-deploy bootstrap (#513): on
|
||||
* first deploy the prod admin password is permanently locked to whatever ends
|
||||
* up in the database, so a key-name mismatch would lock prod to the dev defaults
|
||||
* {@code admin@familyarchive.local} / {@code admin123}.
|
||||
*
|
||||
* <p>No Spring context — Binder reads application.yaml directly.
|
||||
*/
|
||||
class AdminSeedPropertyKeyTest {
|
||||
|
||||
@Test
|
||||
void admin_email_key_binds_from_yaml() {
|
||||
Binder binder = binderFromApplicationYaml();
|
||||
|
||||
String email = binder.bind("app.admin.email", String.class)
|
||||
.orElseThrow(() -> new AssertionError(
|
||||
"app.admin.email is missing from application.yaml. "
|
||||
+ "UserDataInitializer reads this exact key; if the yaml uses "
|
||||
+ "a different name (e.g. 'username'), the env var "
|
||||
+ "APP_ADMIN_USERNAME is silently ignored."));
|
||||
|
||||
assertThat(email)
|
||||
.as("app.admin.email must resolve from APP_ADMIN_USERNAME or its default")
|
||||
.isNotBlank();
|
||||
}
|
||||
|
||||
@Test
|
||||
void admin_password_key_binds_from_yaml() {
|
||||
Binder binder = binderFromApplicationYaml();
|
||||
|
||||
String password = binder.bind("app.admin.password", String.class)
|
||||
.orElseThrow(() -> new AssertionError(
|
||||
"app.admin.password is missing from application.yaml. "
|
||||
+ "UserDataInitializer reads this exact key."));
|
||||
|
||||
assertThat(password)
|
||||
.as("app.admin.password must resolve from APP_ADMIN_PASSWORD or its default")
|
||||
.isNotBlank();
|
||||
}
|
||||
|
||||
@Test
|
||||
void userDataInitializer_reads_app_admin_email_not_username() throws NoSuchFieldException {
|
||||
// Pin the Java side too: a future rename of the @Value placeholder
|
||||
// (e.g. back to `${app.admin.username:...}`) would silently break the
|
||||
// binding while the yaml-side assertions above still pass. See #513.
|
||||
Field field = UserDataInitializer.class.getDeclaredField("adminEmail");
|
||||
Value annotation = field.getAnnotation(Value.class);
|
||||
assertThat(annotation)
|
||||
.as("UserDataInitializer.adminEmail must be @Value-annotated")
|
||||
.isNotNull();
|
||||
assertThat(annotation.value())
|
||||
.as("UserDataInitializer must read app.admin.email — not username or any other key")
|
||||
.startsWith("${app.admin.email:");
|
||||
}
|
||||
|
||||
@Test
|
||||
void userDataInitializer_reads_app_admin_password() throws NoSuchFieldException {
|
||||
Field field = UserDataInitializer.class.getDeclaredField("adminPassword");
|
||||
Value annotation = field.getAnnotation(Value.class);
|
||||
assertThat(annotation).isNotNull();
|
||||
assertThat(annotation.value())
|
||||
.as("UserDataInitializer must read app.admin.password")
|
||||
.startsWith("${app.admin.password:");
|
||||
}
|
||||
|
||||
private Binder binderFromApplicationYaml() {
|
||||
YamlPropertiesFactoryBean yaml = new YamlPropertiesFactoryBean();
|
||||
yaml.setResources(new ClassPathResource("application.yaml"));
|
||||
Properties props = yaml.getObject();
|
||||
assertThat(props).as("application.yaml must be on the classpath").isNotNull();
|
||||
return new Binder(ConfigurationPropertySources.from(
|
||||
new PropertiesPropertySource("application", props)));
|
||||
}
|
||||
}
|
||||
@@ -224,7 +224,7 @@ services:
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:3000/login >/dev/null 2>&1 || exit 1"]
|
||||
test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:3000/login >/dev/null 2>&1 || exit 1"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
|
||||
@@ -150,6 +150,9 @@ ufw default deny incoming && ufw allow 22/tcp && ufw allow 80/tcp && ufw allow 4
|
||||
apt install caddy
|
||||
|
||||
# Use the Caddyfile from the repo (replace path with the runner's clone target)
|
||||
# CI DEPENDENCY: the nightly and release workflows run `systemctl reload caddy` to
|
||||
# pick up committed Caddyfile changes. They find the file via this symlink — if it
|
||||
# is absent or points elsewhere, the reload succeeds but serves stale config.
|
||||
ln -sf /opt/familienarchiv/infra/caddy/Caddyfile /etc/caddy/Caddyfile
|
||||
systemctl reload caddy
|
||||
|
||||
|
||||
63
docs/adr/012-nsenter-for-host-service-management-in-ci.md
Normal file
63
docs/adr/012-nsenter-for-host-service-management-in-ci.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# ADR-012: nsenter via privileged sibling container for host service management in CI
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
The deploy workflows (`.gitea/workflows/nightly.yml`, `release.yml`) run job steps inside Docker containers under a Docker-out-of-Docker (DooD) setup: the Gitea runner container mounts the host Docker socket, and act_runner spawns a sibling container for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`).
|
||||
|
||||
This architecture has one significant limitation: **job containers cannot manage host services**. Specifically:
|
||||
|
||||
- Job containers are not in the host's PID, mount, UTS, network, or IPC namespaces.
|
||||
- There is no systemd PID 1 inside a job container — `systemctl` has nothing to talk to.
|
||||
- `sudo` is not present in standard container images; even if it were, it would not help.
|
||||
- Caddy runs as a **host systemd service** (not a Docker container), managing TLS certificates via Let's Encrypt. It must be running on the host to serve port 443.
|
||||
|
||||
The deploy workflows need to tell Caddy to reload its config after each deploy so that committed Caddyfile changes are applied before the smoke test validates the public surface. Without a reload step, Caddy silently serves the previous config and the smoke test may pass against stale configuration.
|
||||
|
||||
## Decision
|
||||
|
||||
Use the host Docker socket (already mounted in every job container via `runner-config.yaml`) to spin up a **privileged sibling container** in the host PID namespace, then use `nsenter` to enter all host namespaces and call `systemctl reload caddy`:
|
||||
|
||||
```yaml
|
||||
- name: Reload Caddy
|
||||
run: |
|
||||
docker run --rm --privileged --pid=host \
|
||||
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
|
||||
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
|
||||
```
|
||||
|
||||
`nsenter -t 1 -m -u -n -p -i` enters the init process's mount, UTS, IPC, network, PID, and cgroup namespaces, giving `systemctl` a view of the real host systemd daemon.
|
||||
|
||||
**Alpine is used** instead of Ubuntu: ~5 MB vs ~70 MB pull size, no unnecessary tooling. `util-linux` (which ships `nsenter`) is installed at run time; apk add takes ~1 s on the warm VPS cache. The image digest is pinned so any upstream change requires an explicit Renovate bump PR.
|
||||
|
||||
**`reload` not `restart`**: reload sends SIGHUP so Caddy re-reads its config in-process without dropping TLS connections or in-flight requests.
|
||||
|
||||
**No sudoers entry is required**: the Docker socket already grants root-equivalent host access. This pattern makes existing implicit privileges explicit rather than introducing new ones.
|
||||
|
||||
This decision applies the same pattern to both `nightly.yml` and `release.yml` since both deploy the app stack and must apply Caddyfile changes before smoke-testing the public surface.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
| Alternative | Why rejected |
|
||||
|---|---|
|
||||
| `sudo systemctl reload caddy` in the job container | No systemd PID 1 inside the container — `systemctl` has nothing to connect to. `sudo` is not present in container images and would not help even if it were. |
|
||||
| Caddy admin API (`curl localhost:2019/load`) | Job containers do not share the host network namespace; `localhost:2019` on the host is unreachable. Exposing `:2019` on a host-bound port would add a network attack surface with no benefit over the current approach. |
|
||||
| SSH from the job container to the VPS host | Requires storing an SSH private key as a CI secret, managing authorized_keys on the host, and opening an inbound SSH path from the container. Adds key management overhead for a pattern that the Docker socket already enables more directly. |
|
||||
| Running Caddy as a Docker container (instead of host service) | Caddy manages TLS certificates via Let's Encrypt; running it in Docker complicates certificate persistence and renewal. As a host service, cert storage is straightforward and restarts do not risk rate-limit issues. This would be a larger infrastructure change unrelated to the CI gap. |
|
||||
|
||||
## Consequences
|
||||
|
||||
- The runner host's Docker socket access is now a capability relied upon for host service management, not just for running `docker compose` commands. This is stated explicitly in the YAML comment so future reviewers understand the trust boundary.
|
||||
- The Caddyfile symlink on the VPS (`/etc/caddy/Caddyfile → /opt/familienarchiv/infra/caddy/Caddyfile`) is a required contract for CI to succeed. It is documented in `docs/DEPLOYMENT.md §3.1` and `docs/infrastructure/ci-gitea.md`. If the symlink is absent or mis-pointed, `systemctl reload caddy` succeeds but Caddy serves stale config.
|
||||
- Renovate will create bump PRs when a new Alpine 3.21 digest is published. Because the container runs `--privileged --pid=host`, these bump PRs must be reviewed manually and must not be auto-merged. A `packageRule` in `renovate.json` enforces this.
|
||||
- The step is duplicated between `nightly.yml` and `release.yml` (tracked in issue #539 for extraction into a composite action).
|
||||
- If Caddy is not running when the step executes, `systemctl reload` exits non-zero and the workflow aborts before the smoke test — preventing a misleading "port 443 refused" curl error.
|
||||
|
||||
## References
|
||||
|
||||
- `docs/infrastructure/ci-gitea.md` §"Running host-level commands from CI (nsenter pattern)" — full operational context, troubleshooting guide
|
||||
- `docs/DEPLOYMENT.md` §3.1 — Caddyfile symlink bootstrap step
|
||||
- ADR-011 — single-tenant runner trust model (Docker socket access scope)
|
||||
@@ -4,16 +4,109 @@ This document covers the Gitea Actions CI workflow for Familienarchiv, including
|
||||
|
||||
---
|
||||
|
||||
## Self-Hosted Runner Provisioning
|
||||
## Runner Architecture
|
||||
|
||||
Gitea Actions requires self-hosted runners. GitHub Actions provides `ubuntu-latest` for free; on Gitea you run the runner yourself.
|
||||
Familienarchiv uses **two runners** on the same Hetzner VPS:
|
||||
|
||||
```bash
|
||||
# On the VPS — register a Gitea Actions runner
|
||||
docker run -d --name gitea-runner --restart unless-stopped -v /var/run/docker.sock:/var/run/docker.sock -v gitea-runner-data:/data -e GITEA_INSTANCE_URL=https://gitea.example.com -e GITEA_RUNNER_REGISTRATION_TOKEN=<token-from-gitea-settings> -e GITEA_RUNNER_NAME=vps-runner-1 -e GITEA_RUNNER_LABELS=ubuntu-latest:docker://node:20-bullseye gitea/act_runner:latest
|
||||
| Runner | Purpose | Config |
|
||||
|---|---|---|
|
||||
| `gitea` (Docker container) | Hosts Gitea itself | `infra/gitea/docker-compose.yml` |
|
||||
| `gitea-runner` (Docker container) | Runs all CI and deploy jobs | `infra/gitea/docker-compose.yml` + `/root/docker/gitea/runner-config.yaml` |
|
||||
|
||||
Both containers live in the `gitea_gitea` Docker network on the VPS. The runner connects to Gitea via the LAN IP so job containers (which don't share the `gitea_gitea` network) can also reach it.
|
||||
|
||||
### Docker-out-of-Docker (DooD)
|
||||
|
||||
The `gitea-runner` container mounts the host Docker socket (`/var/run/docker.sock`). When a workflow job runs, act_runner spawns a **sibling container** for each job. That job container also gets the Docker socket mounted (via `valid_volumes` in `runner-config.yaml`), enabling `docker compose` calls in workflow steps.
|
||||
|
||||
### Running host-level commands from CI (nsenter pattern)
|
||||
|
||||
Job containers are unprivileged and do not share the host's PID/mount/network namespaces. Commands like `systemctl` that target the host daemon are therefore unavailable by default. When a workflow step needs to manage a host service (e.g. `systemctl reload caddy`), it uses the Docker socket to spin up a **privileged sibling container** in the host PID namespace:
|
||||
|
||||
```yaml
|
||||
- name: Reload Caddy
|
||||
run: |
|
||||
docker run --rm --privileged --pid=host \
|
||||
alpine:3.21@sha256:48b0309ca019d89d40f670aa1bc06e426dc0931948452e8491e3d65087abc07d \
|
||||
sh -c 'apk add --no-cache util-linux -q && nsenter -t 1 -m -u -n -p -i -- /bin/systemctl reload caddy'
|
||||
```
|
||||
|
||||
The runner label `ubuntu-latest` maps to the Docker image it uses -- this is how `runs-on: ubuntu-latest` in the workflow YAML continues to work unchanged.
|
||||
`nsenter -t 1 -m -u -n -p -i` enters the init process's mount, UTS, IPC, network, PID, and cgroup namespaces, giving `systemctl` a view of the real host systemd. No sudoers entry is required — the Docker socket already grants root-equivalent host access.
|
||||
|
||||
Alpine is used instead of Ubuntu: ~5 MB vs ~70 MB, and the digest is pinned to a specific sha256 so any upstream change requires an explicit Renovate bump PR. `util-linux` (which ships `nsenter`) is not part of the Alpine base image but is installed at run time in ~1 s from the warm VPS cache.
|
||||
|
||||
#### Why not `sudo systemctl` in the job container?
|
||||
|
||||
Job containers run as root inside an unprivileged Docker namespace. There is no systemd PID 1 inside the container — `systemctl` would attempt to reach a socket that does not exist. `sudo` is not present in container images and would not help even if it were.
|
||||
|
||||
#### Why not Caddy's admin API?
|
||||
|
||||
Caddy ships a localhost admin API at `:2019` by default. Job containers do not share the host network namespace, so they cannot reach `localhost:2019` on the host. Exposing `:2019` on a host-bound port to make it reachable would add a network attack surface with no benefit over the current approach.
|
||||
|
||||
### Caddyfile symlink contract
|
||||
|
||||
The deploy workflows reload Caddy to pick up committed Caddyfile changes. This relies on a symlink that must exist on the VPS:
|
||||
|
||||
```
|
||||
/etc/caddy/Caddyfile → /opt/familienarchiv/infra/caddy/Caddyfile
|
||||
```
|
||||
|
||||
Created once during server bootstrap (see `docs/DEPLOYMENT.md §3.1`). Verify with:
|
||||
|
||||
```bash
|
||||
ls -la /etc/caddy/Caddyfile
|
||||
# Expected: lrwxrwxrwx ... /etc/caddy/Caddyfile -> /opt/familienarchiv/infra/caddy/Caddyfile
|
||||
```
|
||||
|
||||
### Troubleshooting: Reload Caddy step fails
|
||||
|
||||
**Failure mode 1 — Caddy is stopped**
|
||||
|
||||
Symptom in CI log:
|
||||
```
|
||||
Failed to reload caddy.service: Unit caddy.service is not active.
|
||||
```
|
||||
|
||||
Recovery:
|
||||
```bash
|
||||
ssh root@<vps>
|
||||
systemctl start caddy
|
||||
systemctl status caddy # confirm Active: active (running)
|
||||
```
|
||||
|
||||
Re-run the workflow via Gitea Actions → "Re-run workflow".
|
||||
|
||||
**Failure mode 2 — Caddyfile symlink is missing or mis-pointed**
|
||||
|
||||
This failure is silent — `systemctl reload caddy` exits 0 but Caddy reloads whatever `/etc/caddy/Caddyfile` currently resolves to. The smoke test may then pass against stale config.
|
||||
|
||||
Symptom: smoke test fails on the HSTS value or the `/actuator/health → 404` check despite the Reload Caddy step succeeding.
|
||||
|
||||
Diagnosis:
|
||||
```bash
|
||||
ssh root@<vps>
|
||||
ls -la /etc/caddy/Caddyfile
|
||||
# Should be: lrwxrwxrwx ... /etc/caddy/Caddyfile -> /opt/familienarchiv/infra/caddy/Caddyfile
|
||||
```
|
||||
|
||||
Recovery if symlink is wrong or missing:
|
||||
```bash
|
||||
ln -sf /opt/familienarchiv/infra/caddy/Caddyfile /etc/caddy/Caddyfile
|
||||
systemctl reload caddy
|
||||
```
|
||||
|
||||
**Failure mode 3 — nsenter / Docker socket unavailable**
|
||||
|
||||
Symptom in CI log:
|
||||
```
|
||||
docker: Cannot connect to the Docker daemon at unix:///var/run/docker.sock.
|
||||
```
|
||||
or
|
||||
```
|
||||
nsenter: failed to execute /bin/systemctl: No such file or directory
|
||||
```
|
||||
|
||||
The first error means the Docker socket is not mounted into the job container — check `valid_volumes` in `/root/docker/gitea/runner-config.yaml` on the VPS. The second means the Alpine image is running but cannot enter the host mount namespace; verify `--privileged` and `--pid=host` are both present in the workflow step.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ export const load: PageServerLoad = ({ url }) => {
|
||||
};
|
||||
|
||||
export const actions = {
|
||||
login: async ({ request, cookies, fetch }) => {
|
||||
login: async ({ request, cookies, fetch, url }) => {
|
||||
const data = await request.formData();
|
||||
const email = data.get('email') as string;
|
||||
const password = data.get('password') as string;
|
||||
@@ -37,11 +37,17 @@ export const actions = {
|
||||
return fail(500, { error: getErrorMessage('INTERNAL_ERROR') });
|
||||
}
|
||||
|
||||
// The cookie IS the API credential — promoted to `Authorization: Basic …`
|
||||
// on every browser → backend request by AuthTokenCookieFilter on the
|
||||
// Spring side (see #520). It must be Secure on HTTPS or it leaks
|
||||
// a 24h Basic token on plaintext networks. Dev runs over HTTP and
|
||||
// would silently lose the cookie if we hardcoded secure=true.
|
||||
const isHttps = url.protocol === 'https:';
|
||||
cookies.set('auth_token', authHeader, {
|
||||
path: '/',
|
||||
httpOnly: true,
|
||||
sameSite: 'strict',
|
||||
secure: false, // set to true when HTTPS is available
|
||||
secure: isHttps,
|
||||
maxAge: 60 * 60 * 24
|
||||
});
|
||||
} catch (e) {
|
||||
|
||||
@@ -31,8 +31,16 @@
|
||||
# in application.yaml, /actuator/* is unreachable externally. The internal
|
||||
# Prometheus scrape (future) talks to the backend directly on the docker
|
||||
# network, not via Caddy.
|
||||
@actuator path /actuator/*
|
||||
respond @actuator 404
|
||||
#
|
||||
# Why a `handle` block and not a top-level `respond @matcher`: each archive
|
||||
# vhost has a catch-all `handle { reverse_proxy ... }` that matches every
|
||||
# path including /actuator/*, and Caddy's `handle` blocks are mutually
|
||||
# exclusive. Without our own `handle /actuator/*` the catch-all wins, the
|
||||
# request is proxied to the backend, and Spring Security 302s to /login
|
||||
# instead of Caddy returning 404. See #512.
|
||||
handle /actuator/* {
|
||||
respond 404
|
||||
}
|
||||
}
|
||||
|
||||
(access_log) {
|
||||
|
||||
@@ -5,6 +5,13 @@
|
||||
"matchPackagePatterns": ["^@tiptap/"],
|
||||
"groupName": "tiptap",
|
||||
"automerge": false
|
||||
},
|
||||
{
|
||||
"description": "Digest bumps for images used in privileged CI steps (--privileged --pid=host) must be reviewed manually — a compromised image has root-equivalent host access.",
|
||||
"matchPaths": [".gitea/workflows/**"],
|
||||
"matchUpdateTypes": ["digest"],
|
||||
"automerge": false,
|
||||
"reviewersFromCodeOwners": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user