Compare commits
244 Commits
feat/issue
...
fix/issue-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6ba7254344 | ||
|
|
b2955fb695 | ||
| 5d2888e038 | |||
|
|
3668555421 | ||
|
|
54a8f7f8e9 | ||
|
|
f8f0951bd5 | ||
| c3c1efe5f1 | |||
|
|
e5363913ec | ||
|
|
4d4d5793bb | ||
|
|
9adde3cd89 | ||
|
|
440a191138 | ||
|
|
1873f50f7f | ||
|
|
a4f2047bcc | ||
|
|
09680557ef | ||
|
|
8fcf653cb0 | ||
|
|
a7a80f8c16 | ||
|
|
03d478840b | ||
|
|
6a6a1c4353 | ||
|
|
b57afb9ad2 | ||
|
|
59bc81d353 | ||
|
|
33300e4ad9 | ||
|
|
fe1451f570 | ||
|
|
f2ec81547b | ||
|
|
7e430998b8 | ||
|
|
156afa14a2 | ||
|
|
91f70e652d | ||
|
|
9652894aa4 | ||
|
|
e5d953dee8 | ||
|
|
ba5bd9cb11 | ||
|
|
83565c6bb5 | ||
|
|
a91a3e1f61 | ||
|
|
c523721ce8 | ||
|
|
ad69d7cb83 | ||
|
|
8d27c82e6d | ||
|
|
4eb5eba347 | ||
|
|
47c5f77c81 | ||
|
|
a36f25cfc3 | ||
|
|
c9ac83b2ba | ||
|
|
e4df17f308 | ||
|
|
2eade2b78f | ||
|
|
334b507476 | ||
|
|
59349dfe93 | ||
|
|
56e55ff488 | ||
|
|
ecb930e5f9 | ||
|
|
8b109349c2 | ||
|
|
ebd0f671f9 | ||
|
|
83f022ff4b | ||
|
|
80ccc0f3c6 | ||
|
|
eccecf35e3 | ||
|
|
16f69fff33 | ||
|
|
bb374bf2cd | ||
|
|
1a28e3114d | ||
|
|
915ad9f5c6 | ||
|
|
143622bf27 | ||
|
|
a3906976e8 | ||
|
|
b017da22c3 | ||
|
|
fea837b345 | ||
|
|
a364e3f69b | ||
|
|
7ca44d7df1 | ||
|
|
e975642a4c | ||
|
|
72f422afe2 | ||
|
|
6074480482 | ||
|
|
5512790d5a | ||
|
|
a158048f45 | ||
|
|
ac999066dd | ||
|
|
8b25a5b940 | ||
|
|
265b4f1484 | ||
|
|
bfc3a17676 | ||
|
|
eb54a98ea2 | ||
|
|
3fcdfa85f1 | ||
|
|
cd1c0b210e | ||
|
|
a239c16c31 | ||
|
|
8a8205ad8d | ||
|
|
0430383e1c | ||
|
|
e2d74ff880 | ||
|
|
586eea009b | ||
|
|
7c2c4741ab | ||
|
|
d464bca9f3 | ||
|
|
2283f733cc | ||
|
|
cc20583ae6 | ||
|
|
86d75d91be | ||
|
|
a98ca0e5d3 | ||
|
|
1c515a3145 | ||
|
|
43d36c898c | ||
|
|
60326cfb0a | ||
|
|
e598f5a506 | ||
|
|
e1c78e3fbe | ||
|
|
ae6355d206 | ||
|
|
b5f9fcfdfd | ||
|
|
2f48dfabd1 | ||
|
|
495210052f | ||
|
|
a072701632 | ||
|
|
eac2356948 | ||
|
|
d554fc7e6b | ||
|
|
7bd477d24e | ||
|
|
b1c2132aa6 | ||
|
|
f7eefb525f | ||
|
|
500611925d | ||
|
|
64bcc8d031 | ||
|
|
5a8a1898f8 | ||
|
|
b4f24f4965 | ||
|
|
9e1754bbb0 | ||
|
|
797852b494 | ||
|
|
518334bc38 | ||
|
|
c8b1a890be | ||
|
|
1f592958d7 | ||
|
|
9b5547757a | ||
|
|
92587b050e | ||
|
|
2be2087a95 | ||
|
|
4d9234244e | ||
|
|
9b82621770 | ||
|
|
a58e796ffa | ||
|
|
6a46a1e3eb | ||
|
|
5b645f6374 | ||
|
|
d76ee5fa31 | ||
|
|
5146aeb568 | ||
|
|
9fd1f3cde2 | ||
|
|
5cd6ecc624 | ||
|
|
86de118d63 | ||
|
|
00f35ab675 | ||
|
|
c0a1f04df5 | ||
|
|
7f99c64d45 | ||
|
|
18aaf1f3e8 | ||
|
|
dd0a77a5a2 | ||
|
|
f68d16ef58 | ||
|
|
301cfffd1a | ||
|
|
bf501b7d62 | ||
|
|
5d749b2415 | ||
|
|
1d6016cb19 | ||
|
|
48da819a54 | ||
|
|
153752a901 | ||
|
|
3b6b117c75 | ||
|
|
2e9ce8e1da | ||
|
|
c9be6cc165 | ||
|
|
ffe617dba8 | ||
|
|
47841b9110 | ||
|
|
360db1ae33 | ||
|
|
e5739d7f8e | ||
|
|
219d9a816e | ||
|
|
00682bac4f | ||
|
|
77d282bbeb | ||
|
|
52827ccc87 | ||
|
|
61d1c1793b | ||
|
|
c06987da95 | ||
|
|
5028082da4 | ||
|
|
ea106e9414 | ||
|
|
dfdcacdb85 | ||
|
|
c9fb677499 | ||
|
|
6aceafda8e | ||
|
|
5d92f5a32b | ||
|
|
a6123e1867 | ||
|
|
bd81ff81f9 | ||
|
|
76023a99ed | ||
|
|
e92e9e452e | ||
|
|
59a2faa145 | ||
|
|
8e29f428d7 | ||
|
|
e8fb8150b7 | ||
|
|
6786c0112d | ||
|
|
d43d73f231 | ||
|
|
ad82f2e1e2 | ||
|
|
5fdcc95c3d | ||
|
|
142459b916 | ||
|
|
b31979c4f0 | ||
|
|
1060be7def | ||
|
|
fbf4725e97 | ||
|
|
c90b42d045 | ||
|
|
e61e3797d1 | ||
|
|
ce0c013f0f | ||
|
|
baa0a9811c | ||
|
|
9ef3c82398 | ||
|
|
708fd9d63e | ||
|
|
abe8ab8668 | ||
|
|
e3a3f209f9 | ||
|
|
e877847b7e | ||
|
|
7c25d08506 | ||
|
|
c10e8e8a3a | ||
|
|
0c765d8112 | ||
|
|
cdb54c7545 | ||
|
|
6ab7abb9df | ||
|
|
d28c455991 | ||
|
|
0fa90d58cb | ||
|
|
172bafe202 | ||
|
|
ba0bfc6a7e | ||
|
|
d4b5c14a26 | ||
|
|
e209d4877d | ||
|
|
66c1998d2f | ||
|
|
62bef1d267 | ||
|
|
c3d4762ca0 | ||
|
|
421d7ffd37 | ||
|
|
dbf19037fe | ||
|
|
9387fcc17b | ||
|
|
264db4e1c9 | ||
|
|
12f0e21b21 | ||
|
|
3e33021129 | ||
|
|
32396c6253 | ||
|
|
11b4206fe2 | ||
|
|
eede9f93a7 | ||
|
|
260bb8e164 | ||
|
|
9b82d8e7dd | ||
|
|
ab6117c87e | ||
|
|
b1f9f1603c | ||
|
|
f2a901eabf | ||
|
|
d6ca0f12c9 | ||
|
|
537bfb79f0 | ||
|
|
f74b586f29 | ||
|
|
eb464b351a | ||
|
|
9ad172084a | ||
|
|
0582edd840 | ||
|
|
9986af7c3d | ||
|
|
a4bde0953e | ||
|
|
1b55588aee | ||
|
|
1c560289c8 | ||
|
|
61e58e98ba | ||
|
|
3608a9723a | ||
|
|
63f00ce0a0 | ||
|
|
0a5b290e6c | ||
|
|
ab1a1d1a3d | ||
|
|
9d22a5134f | ||
|
|
883c3381a7 | ||
|
|
f34967f764 | ||
|
|
12487d187f | ||
|
|
d01b9a7508 | ||
|
|
d69a3abc3b | ||
|
|
5c72364899 | ||
|
|
50b18f0849 | ||
|
|
6cf5405b7a | ||
|
|
86c13a230c | ||
|
|
513fda2888 | ||
|
|
995c696c6a | ||
|
|
9b2ed48689 | ||
|
|
a1b89670c0 | ||
|
|
a3c17750cd | ||
|
|
83db80b867 | ||
|
|
a944563560 | ||
|
|
8225baf578 | ||
|
|
bab30fe29c | ||
|
|
69b564b34b | ||
|
|
fc53038af2 | ||
|
|
869885eb78 | ||
|
|
a9b8e19dea | ||
|
|
080e8eb55f | ||
|
|
a5f4b0df31 | ||
|
|
9dae044eec | ||
|
|
5302075124 |
@@ -410,6 +410,23 @@ Never Kafka for teams under 10 or <100k events/day. Never gRPC inside a monolith
|
||||
4. Identify missing database-layer enforcement (constraints, RLS)
|
||||
5. Check transport choices — simpler protocol available?
|
||||
6. Propose a concrete simpler alternative, not just a critique
|
||||
7. Verify documentation currency. For each category below, check whether the PR triggered the update. Flag missing updates as blockers.
|
||||
|
||||
| PR contains | Required doc update |
|
||||
|---|---|
|
||||
| New Flyway migration adding/removing/renaming a table or column | `docs/architecture/db/db-orm.puml` and `docs/architecture/db/db-relationships.puml` |
|
||||
| New `@ManyToMany` join table or FK | Both DB diagrams |
|
||||
| New backend package or domain module | `CLAUDE.md` package table + matching `docs/architecture/c4/l3-backend-*.puml` |
|
||||
| New controller or service in an existing backend domain | Matching `docs/architecture/c4/l3-backend-*.puml` |
|
||||
| New SvelteKit route | `CLAUDE.md` route table + matching `docs/architecture/c4/l3-frontend-*.puml` |
|
||||
| New Docker service or infrastructure component | `docs/architecture/c4/l2-containers.puml` + `docs/DEPLOYMENT.md` |
|
||||
| New external system integrated | `docs/architecture/c4/l1-context.puml` |
|
||||
| Auth or upload flow change | `docs/architecture/c4/seq-auth-flow.puml` or `docs/architecture/c4/seq-document-upload.puml` |
|
||||
| New `ErrorCode` or `Permission` value | `CLAUDE.md` + `docs/ARCHITECTURE.md` |
|
||||
| New domain concept or term | `docs/GLOSSARY.md` |
|
||||
| Architectural decision with lasting consequences | New ADR in `docs/adr/` |
|
||||
|
||||
A doc omission is a blocker, not a concern — the PR does not merge until the diagram or text matches the code.
|
||||
|
||||
### Designing Systems
|
||||
1. Start with the data model — get the schema right before application code
|
||||
|
||||
@@ -980,6 +980,24 @@ Mark with `@pytest.mark.asyncio` so pytest runs the coroutine. Without it, the t
|
||||
5. Refactor — apply clean code, extract if 3+ duplications, rename for intent
|
||||
6. Repeat for the next behavior
|
||||
7. When all behaviors are green, review for SOLID violations across the full stack
|
||||
8. Update documentation before opening the PR. Use the table below to know which doc to touch.
|
||||
|
||||
| What changed in code | Doc(s) to update |
|
||||
|---|---|
|
||||
| New Flyway migration adds/removes/renames a table or column | `docs/architecture/db/db-orm.puml` (add/remove entity or attribute) **and** `docs/architecture/db/db-relationships.puml` (add/remove relationship line) |
|
||||
| New `@ManyToMany` join table or FK relationship | Both DB diagrams above |
|
||||
| New backend package / domain module | `CLAUDE.md` (package structure table) **and** the matching `docs/architecture/c4/l3-backend-*.puml` diagram for that domain |
|
||||
| New Spring Boot controller or service in an existing domain | The matching `docs/architecture/c4/l3-backend-*.puml` for that domain |
|
||||
| New SvelteKit route (`+page.svelte`) | `CLAUDE.md` (route structure section) **and** the matching `docs/architecture/c4/l3-frontend-*.puml` diagram |
|
||||
| New Docker service / infrastructure component | `docs/architecture/c4/l2-containers.puml` **and** `docs/DEPLOYMENT.md` |
|
||||
| New external system integrated (new API, new S3 bucket, etc.) | `docs/architecture/c4/l1-context.puml` |
|
||||
| Auth flow or document-upload flow changes | `docs/architecture/c4/seq-auth-flow.puml` or `docs/architecture/c4/seq-document-upload.puml` |
|
||||
| New `ErrorCode` enum value | `CLAUDE.md` error handling section **and** `CONTRIBUTING.md` |
|
||||
| New `Permission` enum value | `CLAUDE.md` security section **and** `docs/ARCHITECTURE.md` |
|
||||
| New domain term introduced (entity name, status, concept) | `docs/GLOSSARY.md` |
|
||||
| Architectural decision with lasting consequences (new tech, new transport protocol, new pattern) | New ADR in `docs/adr/` |
|
||||
|
||||
Skip a doc only if the change genuinely does not affect what that doc describes.
|
||||
|
||||
### Reviewing Code
|
||||
1. TDD evidence — are there tests? Do they precede the implementation?
|
||||
|
||||
@@ -38,10 +38,10 @@ Screen readers and search engines rely on landmarks to navigate. Every page need
|
||||
|
||||
2. **Use CSS custom properties for all brand colors**
|
||||
```css
|
||||
/* layout.css */
|
||||
--color-ink: #002850;
|
||||
--color-accent: #A6DAD8;
|
||||
--color-surface: #E4E2D7;
|
||||
/* layout.css — semantic tokens backed by CSS variables (see --palette-* for raw values) */
|
||||
--color-ink: var(--c-ink);
|
||||
--color-accent: var(--c-accent);
|
||||
--color-surface: var(--c-surface);
|
||||
```
|
||||
```svelte
|
||||
<div class="text-ink bg-surface border-line">
|
||||
@@ -103,9 +103,9 @@ unsaved work without warning.
|
||||
|
||||
1. **Enforce WCAG AA contrast ratios**
|
||||
```
|
||||
brand-navy (#002850) on white: 14.5:1 -- AAA pass
|
||||
brand-mint (#A6DAD8) on navy: 7.2:1 -- AAA pass for large text
|
||||
Gray-500 on white: check >= 4.5:1 -- AA minimum for body text
|
||||
brand-navy (--palette-navy) on white: ~14.5:1 -- AAA pass (verify exact value in layout.css)
|
||||
brand-mint (--palette-mint) on navy: ~7.2:1 -- AAA pass for large text
|
||||
Gray-500 on white: check >= 4.5:1 -- AA minimum for body text
|
||||
```
|
||||
Always verify contrast with a tool. AA is the floor (4.5:1 normal text, 3:1 large text). Target AAA (7:1) for body copy.
|
||||
|
||||
@@ -134,8 +134,8 @@ Color-blind users (8% of men) cannot distinguish status by color alone. Always p
|
||||
/* Silver #CACAC9 on white = 1.5:1 -- fails all WCAG levels */
|
||||
.caption { color: #CACAC9; }
|
||||
|
||||
/* brand-mint on white = 2.8:1 -- fails AA for normal text */
|
||||
.label { color: #A6DAD8; }
|
||||
/* brand-mint on white = ~2.8:1 -- fails AA for normal text */
|
||||
.label { color: var(--palette-mint); }
|
||||
```
|
||||
Test every text color against its background. Decorative palette colors are for borders and backgrounds, not text.
|
||||
|
||||
@@ -338,7 +338,7 @@ Test at 320px (small phone), 768px (tablet), and 1440px (desktop). Review diffs
|
||||
<table>
|
||||
<tr><td>Section title</td><td><code>text-xs font-bold uppercase tracking-widest</code></td>
|
||||
<td>12px / 700</td><td>Most commonly undersized</td></tr>
|
||||
<tr><td>Card container</td><td><code>bg-white shadow-sm border border-brand-sand rounded-sm p-6</code></td>
|
||||
<tr><td>Card container</td><td><code>bg-surface shadow-sm border border-line rounded-sm p-6</code></td>
|
||||
<td>padding 24px</td><td>—</td></tr>
|
||||
</table>
|
||||
</div>
|
||||
@@ -376,10 +376,10 @@ await page.setViewportSize({ width: 1440, height: 900 });
|
||||
## Domain Expertise
|
||||
|
||||
### Brand Palette
|
||||
- **Primary**: brand-navy `#002850` (text, buttons, headers), brand-mint `#A6DAD8` (accents, hover), brand-sand `#E4E2D7` (backgrounds, borders)
|
||||
- **Typography**: `font-serif` (Merriweather) for body/titles, `font-sans` (Montserrat) for labels/UI chrome
|
||||
- **Card pattern**: `bg-white shadow-sm border border-brand-sand rounded-sm p-6`
|
||||
- **Section title**: `text-xs font-bold uppercase tracking-widest text-gray-400 mb-5`
|
||||
- **Primary**: `brand-navy` (`--palette-navy`) — text, buttons, headers; `brand-mint` (`--palette-mint`) — accents, hover; sand (`--palette-sand`) — page background (use `bg-canvas` or `bg-surface` as Tailwind utilities, not `bg-brand-sand`)
|
||||
- **Typography**: `font-serif` (Tinos) for body/titles, `font-sans` (Montserrat) for labels/UI chrome
|
||||
- **Card pattern**: `bg-surface shadow-sm border border-line rounded-sm p-6`
|
||||
- **Section title**: `text-xs font-bold uppercase tracking-widest text-ink-3 mb-5`
|
||||
|
||||
### Dual-Audience Design (25-42 AND 60+)
|
||||
- Seniors: 16px minimum body text (prefer 18px), 44px touch targets (prefer 48px), redundant cues, calm layouts, persistent navigation, no timed interactions
|
||||
|
||||
@@ -1,96 +1,3 @@
|
||||
# Dev Container — Familienarchiv
|
||||
# Dev Container
|
||||
|
||||
## Overview
|
||||
|
||||
VS Code Dev Container configuration for a pre-configured development environment. Includes Java 21, Maven, and Node.js 24 — everything needed to work on both backend and frontend.
|
||||
|
||||
## Configuration
|
||||
|
||||
File: `.devcontainer/devcontainer.json`
|
||||
|
||||
### Included Features
|
||||
|
||||
| Feature | Version | Purpose |
|
||||
|---|---|---|
|
||||
| Java | 21 | Spring Boot backend |
|
||||
| Maven | bundled with Java feature | Build tool |
|
||||
| Node.js | 24 | SvelteKit frontend |
|
||||
|
||||
### VS Code Extensions (Auto-installed)
|
||||
|
||||
| Extension | Purpose |
|
||||
|---|---|
|
||||
| `vscjava.vscode-java-pack` | Java language support, debugging, testing |
|
||||
| `vmware.vscode-spring-boot` | Spring Boot tooling |
|
||||
| `gabrielbb.vscode-lombok` | Lombok annotation support |
|
||||
| `humao.rest-client` | HTTP request files (for `backend/api_tests/`) |
|
||||
|
||||
### Ports
|
||||
|
||||
- `8080` forwarded to host — access backend at `http://localhost:8080`
|
||||
|
||||
### User
|
||||
|
||||
Runs as `vscode` user (not root) for security.
|
||||
|
||||
## How to Use
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- VS Code with the **Dev Containers** extension installed
|
||||
- Docker running locally
|
||||
|
||||
### Open in Dev Container
|
||||
|
||||
1. Open the project in VS Code
|
||||
2. Press `F1` → type "Dev Containers: Reopen in Container"
|
||||
3. VS Code will:
|
||||
- Build the container using the root `docker-compose.yml`
|
||||
- Install Java 21, Maven, and Node 24
|
||||
- Install the listed extensions
|
||||
- Mount the workspace folder
|
||||
|
||||
### Working Inside the Container
|
||||
|
||||
Once inside the container, you have access to both stacks:
|
||||
|
||||
```bash
|
||||
# Backend
|
||||
cd backend
|
||||
./mvnw spring-boot:run
|
||||
|
||||
# Frontend (in a new terminal)
|
||||
cd frontend
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
The container reuses the `docker-compose.yml` services, so PostgreSQL and MinIO are available automatically.
|
||||
|
||||
### Forwarding Frontend Port
|
||||
|
||||
The devcontainer config only forwards port 8080 by default. To access the frontend dev server (port 5173 or 3000), either:
|
||||
|
||||
1. Add `5173` to `forwardPorts` in `devcontainer.json`, or
|
||||
2. Use the VS Code "Ports" panel to forward it dynamically
|
||||
|
||||
## Limitations
|
||||
|
||||
- The devcontainer attaches to the `backend` service from `docker-compose.yml`, so it inherits those environment variables
|
||||
- OCR service and other containers should be started separately via `docker-compose up -d`
|
||||
- GPU passthrough for OCR training is not configured
|
||||
|
||||
## Customization
|
||||
|
||||
To add more tools or extensions, edit `.devcontainer/devcontainer.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/python:1": {
|
||||
"version": "3.11"
|
||||
}
|
||||
},
|
||||
"forwardPorts": [8080, 5173, 3000]
|
||||
}
|
||||
```
|
||||
→ See [.devcontainer/README.md](./README.md) for configuration, usage, and known limitations.
|
||||
|
||||
94
.devcontainer/README.md
Normal file
94
.devcontainer/README.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Dev Container — Familienarchiv
|
||||
|
||||
VS Code Dev Container configuration for a pre-configured development environment. Includes Java 21, Maven, and Node.js 24 — everything needed to work on both backend and frontend.
|
||||
|
||||
## Configuration
|
||||
|
||||
File: `.devcontainer/devcontainer.json`
|
||||
|
||||
### Included Features
|
||||
|
||||
| Feature | Version | Purpose |
|
||||
| ------- | ------------------------- | ------------------- |
|
||||
| Java | 21 | Spring Boot backend |
|
||||
| Maven | bundled with Java feature | Build tool |
|
||||
| Node.js | 24 | SvelteKit frontend |
|
||||
|
||||
### VS Code Extensions (Auto-installed)
|
||||
|
||||
| Extension | Purpose |
|
||||
| --------------------------- | --------------------------------------------- |
|
||||
| `vscjava.vscode-java-pack` | Java language support, debugging, testing |
|
||||
| `vmware.vscode-spring-boot` | Spring Boot tooling |
|
||||
| `gabrielbb.vscode-lombok` | Lombok annotation support |
|
||||
| `humao.rest-client` | HTTP request files (for `backend/api_tests/`) |
|
||||
|
||||
### Ports
|
||||
|
||||
- `8080` forwarded to host — access backend at `http://localhost:8080`
|
||||
|
||||
### User
|
||||
|
||||
Runs as `vscode` user (not root) for security.
|
||||
|
||||
## How to Use
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- VS Code with the **Dev Containers** extension installed
|
||||
- Docker running locally
|
||||
|
||||
### Open in Dev Container
|
||||
|
||||
1. Open the project in VS Code
|
||||
2. Press `F1` → type "Dev Containers: Reopen in Container"
|
||||
3. VS Code will:
|
||||
- Build the container using the root `docker-compose.yml`
|
||||
- Install Java 21, Maven, and Node 24
|
||||
- Install the listed extensions
|
||||
- Mount the workspace folder
|
||||
|
||||
### Working Inside the Container
|
||||
|
||||
Once inside the container, you have access to both stacks:
|
||||
|
||||
```bash
|
||||
# Backend
|
||||
cd backend
|
||||
./mvnw spring-boot:run
|
||||
|
||||
# Frontend (in a new terminal)
|
||||
cd frontend
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
The container reuses the `docker-compose.yml` services, so PostgreSQL and MinIO are available automatically.
|
||||
|
||||
### Forwarding Frontend Port
|
||||
|
||||
The devcontainer config only forwards port 8080 by default. To access the frontend dev server (port 5173 or 3000), either:
|
||||
|
||||
1. Add `5173` to `forwardPorts` in `devcontainer.json`, or
|
||||
2. Use the VS Code "Ports" panel to forward it dynamically
|
||||
|
||||
## Limitations
|
||||
|
||||
- The devcontainer attaches to the `backend` service from `docker-compose.yml`, so it inherits those environment variables
|
||||
- OCR service and other containers should be started separately via `docker-compose up -d`
|
||||
- GPU passthrough for OCR training is not configured
|
||||
|
||||
## Customization
|
||||
|
||||
To add more tools or extensions, edit `.devcontainer/devcontainer.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/python:1": {
|
||||
"version": "3.11"
|
||||
}
|
||||
},
|
||||
"forwardPorts": [8080, 5173, 3000]
|
||||
}
|
||||
```
|
||||
@@ -39,6 +39,48 @@ jobs:
|
||||
- name: Run unit and component tests
|
||||
run: npm test
|
||||
working-directory: frontend
|
||||
env:
|
||||
TZ: Europe/Berlin
|
||||
|
||||
- name: Run coverage (server + client)
|
||||
run: npm run test:coverage
|
||||
working-directory: frontend
|
||||
env:
|
||||
TZ: Europe/Berlin
|
||||
|
||||
- name: Upload coverage reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-reports
|
||||
path: frontend/coverage/
|
||||
|
||||
- name: Build frontend
|
||||
run: npm run build
|
||||
working-directory: frontend
|
||||
|
||||
# ── Prerender output is exactly the public help page ───────────────────
|
||||
# SvelteKit prerender + crawl follows nav links and bakes "redirect to
|
||||
# /login" HTML for every protected route, served BEFORE runtime hooks
|
||||
# (see #514). With `crawl: false` only the explicit entry should land
|
||||
# in build/prerendered/. Anything else is a regression — fail the build.
|
||||
- name: Assert prerender output is only /hilfe/transkription
|
||||
run: |
|
||||
cd frontend
|
||||
set -e
|
||||
extra=$(find build/prerendered -type f \
|
||||
-not -path 'build/prerendered/hilfe/*' \
|
||||
-not -name '*.br' -not -name '*.gz' \
|
||||
|| true)
|
||||
if [ -n "$extra" ]; then
|
||||
echo "FAIL: unexpected prerendered files (would shadow runtime hooks):"
|
||||
echo "$extra"
|
||||
exit 1
|
||||
fi
|
||||
# And the help page must still be there.
|
||||
test -f build/prerendered/hilfe/transkription.html \
|
||||
|| { echo "FAIL: /hilfe/transkription.html missing from prerender output"; exit 1; }
|
||||
echo "PASS: only /hilfe/transkription.html prerendered."
|
||||
|
||||
- name: Upload screenshots
|
||||
if: always()
|
||||
@@ -74,6 +116,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKER_API_VERSION: "1.43" # NAS runner runs Docker 24.x (max API 1.43); Testcontainers 2.x defaults to 1.44
|
||||
DOCKER_HOST: unix:///var/run/docker.sock
|
||||
TESTCONTAINERS_RYUK_DISABLED: "true"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -93,4 +137,123 @@ jobs:
|
||||
run: |
|
||||
chmod +x mvnw
|
||||
./mvnw clean test
|
||||
working-directory: backend
|
||||
working-directory: backend
|
||||
|
||||
# ─── fail2ban Regex Regression ────────────────────────────────────────────────
|
||||
# The filter parses Caddy's JSON access log; a Caddy upgrade that reorders
|
||||
# the JSON keys would silently break it (fail2ban-regex would return
|
||||
# "0 matches", fail2ban would stop banning, no error surface). This job
|
||||
# pins the contract against a deterministic sample line.
|
||||
fail2ban-regex:
|
||||
name: fail2ban Regex
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install fail2ban
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fail2ban
|
||||
|
||||
- name: Matches /api/auth/login 401
|
||||
run: |
|
||||
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/login"},"status":401}' > /tmp/sample.log
|
||||
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
|
||||
echo "$out"
|
||||
echo "$out" | grep -qE '1 matched' \
|
||||
|| { echo "expected 1 match for /api/auth/login 401"; exit 1; }
|
||||
|
||||
- name: Matches /api/auth/login 429
|
||||
run: |
|
||||
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/login"},"status":429}' > /tmp/sample.log
|
||||
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
|
||||
echo "$out"
|
||||
echo "$out" | grep -qE '1 matched' \
|
||||
|| { echo "expected 1 match for /api/auth/login 429"; exit 1; }
|
||||
|
||||
- name: Matches /api/auth/forgot-password 401
|
||||
run: |
|
||||
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/forgot-password"},"status":401}' > /tmp/sample.log
|
||||
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
|
||||
echo "$out"
|
||||
echo "$out" | grep -qE '1 matched' \
|
||||
|| { echo "expected 1 match for /api/auth/forgot-password 401"; exit 1; }
|
||||
|
||||
- name: Does not match /api/auth/login 200
|
||||
run: |
|
||||
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"POST","host":"archiv.raddatz.cloud","uri":"/api/auth/login"},"status":200}' > /tmp/sample.log
|
||||
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
|
||||
echo "$out"
|
||||
echo "$out" | grep -qE '0 matched' \
|
||||
|| { echo "expected 0 matches for /api/auth/login 200"; exit 1; }
|
||||
|
||||
- name: Does not match /api/documents (unrelated 401)
|
||||
run: |
|
||||
echo '{"level":"info","ts":1700000000.12,"logger":"http.log.access","msg":"handled request","request":{"remote_ip":"203.0.113.42","method":"GET","host":"archiv.raddatz.cloud","uri":"/api/documents"},"status":401}' > /tmp/sample.log
|
||||
out=$(fail2ban-regex /tmp/sample.log infra/fail2ban/filter.d/familienarchiv-auth.conf)
|
||||
echo "$out"
|
||||
echo "$out" | grep -qE '0 matched' \
|
||||
|| { echo "expected 0 matches for /api/documents 401"; exit 1; }
|
||||
|
||||
# ── Backend resolves to file-polling, not systemd ─────────────────────
|
||||
# The Debian/Ubuntu fail2ban package ships defaults-debian.conf with
|
||||
# `[DEFAULT] backend = systemd`. Without `backend = polling` in our
|
||||
# jail, the daemon loads the jail but reads from journald and never
|
||||
# touches /var/log/caddy/access.log — i.e. the regex above passes in
|
||||
# isolation while the live jail is inert. See issue #503.
|
||||
- name: Jail resolves with polling backend (not inherited systemd)
|
||||
run: |
|
||||
sudo ln -sfn "$PWD/infra/fail2ban/jail.d/familienarchiv.conf" /etc/fail2ban/jail.d/familienarchiv.conf
|
||||
sudo ln -sfn "$PWD/infra/fail2ban/filter.d/familienarchiv-auth.conf" /etc/fail2ban/filter.d/familienarchiv-auth.conf
|
||||
dump=$(sudo fail2ban-client -d 2>&1)
|
||||
echo "$dump" | grep -E "add.*familienarchiv-auth" || true
|
||||
echo "$dump" | grep -qE "\['add', 'familienarchiv-auth', 'polling'\]" \
|
||||
|| { echo "FAIL: familienarchiv-auth jail did not resolve to 'polling' backend"; exit 1; }
|
||||
|
||||
# ─── Compose Bucket-Bootstrap Idempotency ─────────────────────────────────────
|
||||
# docker-compose.prod.yml's create-buckets service runs on every
|
||||
# `docker compose up` (one-shot, no restart). Must be idempotent — a
|
||||
# re-deploy must not fail just because the bucket / user / policy
|
||||
# already exists. Validated by running create-buckets twice against a
|
||||
# throwaway minio stack and asserting both invocations exit 0.
|
||||
compose-idempotency:
|
||||
name: Compose Bucket Idempotency
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Write stub env file
|
||||
run: |
|
||||
cat > .env.test <<'EOF'
|
||||
TAG=test
|
||||
PORT_BACKEND=18080
|
||||
PORT_FRONTEND=13000
|
||||
APP_DOMAIN=localhost
|
||||
POSTGRES_PASSWORD=stub
|
||||
MINIO_PASSWORD=stubrootpassword
|
||||
MINIO_APP_PASSWORD=stubapppassword
|
||||
OCR_TRAINING_TOKEN=stub
|
||||
APP_ADMIN_USERNAME=admin@local
|
||||
APP_ADMIN_PASSWORD=stub
|
||||
MAIL_HOST=mailpit
|
||||
MAIL_PORT=1025
|
||||
APP_MAIL_FROM=noreply@local
|
||||
EOF
|
||||
|
||||
- name: Bring up minio
|
||||
run: |
|
||||
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test up -d --wait minio
|
||||
|
||||
- name: First create-buckets run
|
||||
run: |
|
||||
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test run --rm create-buckets
|
||||
|
||||
- name: Second create-buckets run (idempotency check)
|
||||
run: |
|
||||
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test run --rm create-buckets
|
||||
|
||||
- name: Teardown
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f docker-compose.prod.yml -p test-idem --env-file .env.test down -v
|
||||
rm -f .env.test
|
||||
138
.gitea/workflows/nightly.yml
Normal file
138
.gitea/workflows/nightly.yml
Normal file
@@ -0,0 +1,138 @@
|
||||
name: nightly
|
||||
|
||||
# Builds and deploys the staging environment from main every night.
|
||||
# Runs on the self-hosted runner using Docker-out-of-Docker (the docker
|
||||
# socket is mounted in), so `docker compose build` produces images on
|
||||
# the host daemon and `docker compose up` consumes them directly — no
|
||||
# registry hop.
|
||||
#
|
||||
# Operational assumptions (see docs/DEPLOYMENT.md §3 for the full setup):
|
||||
#
|
||||
# 1. Single-tenant self-hosted runner. The "Write staging env file" step
|
||||
# writes every secret to .env.staging on the runner filesystem; the
|
||||
# `if: always()` cleanup step removes it. A multi-tenant runner
|
||||
# would need to switch to docker compose --env-file <(stdin) instead.
|
||||
#
|
||||
# 2. Host docker layer cache is authoritative. There is no
|
||||
# actions/cache; we rely on the host daemon to keep Maven and npm
|
||||
# layers warm between runs. A `docker system prune` on the host
|
||||
# will cause the next nightly build to be cold (5–10 min slower).
|
||||
#
|
||||
# Staging environment isolation:
|
||||
# - project name: archiv-staging
|
||||
# - host ports: backend 8081, frontend 3001
|
||||
# - profile: staging (starts mailpit instead of a real SMTP relay)
|
||||
#
|
||||
# Required Gitea secrets:
|
||||
# STAGING_POSTGRES_PASSWORD
|
||||
# STAGING_MINIO_PASSWORD
|
||||
# STAGING_MINIO_APP_PASSWORD
|
||||
# STAGING_OCR_TRAINING_TOKEN
|
||||
# STAGING_APP_ADMIN_USERNAME
|
||||
# STAGING_APP_ADMIN_PASSWORD
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 2 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Ensures the backend Dockerfile's `RUN --mount=type=cache` lines are
|
||||
# honoured (Maven cache survives between runs).
|
||||
DOCKER_BUILDKIT: "1"
|
||||
|
||||
jobs:
|
||||
deploy-staging:
|
||||
# `ubuntu-latest` matches our self-hosted runner's advertised label
|
||||
# (the runner has labels: ubuntu-latest / ubuntu-24.04 / ubuntu-22.04).
|
||||
# `self-hosted` would never match — no runner advertises it — so the
|
||||
# job parks in the queue forever. ADR-011's "single-tenant" promise
|
||||
# is at the repo level; sharing this runner between CI and deploys
|
||||
# for the same repo is within that boundary.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Write staging env file
|
||||
run: |
|
||||
cat > .env.staging <<EOF
|
||||
TAG=nightly
|
||||
PORT_BACKEND=8081
|
||||
PORT_FRONTEND=3001
|
||||
APP_DOMAIN=staging.raddatz.cloud
|
||||
POSTGRES_PASSWORD=${{ secrets.STAGING_POSTGRES_PASSWORD }}
|
||||
MINIO_PASSWORD=${{ secrets.STAGING_MINIO_PASSWORD }}
|
||||
MINIO_APP_PASSWORD=${{ secrets.STAGING_MINIO_APP_PASSWORD }}
|
||||
OCR_TRAINING_TOKEN=${{ secrets.STAGING_OCR_TRAINING_TOKEN }}
|
||||
APP_ADMIN_USERNAME=${{ secrets.STAGING_APP_ADMIN_USERNAME }}
|
||||
APP_ADMIN_PASSWORD=${{ secrets.STAGING_APP_ADMIN_PASSWORD }}
|
||||
MAIL_HOST=mailpit
|
||||
MAIL_PORT=1025
|
||||
MAIL_USERNAME=
|
||||
MAIL_PASSWORD=
|
||||
MAIL_SMTP_AUTH=false
|
||||
MAIL_STARTTLS_ENABLE=false
|
||||
APP_MAIL_FROM=noreply@staging.raddatz.cloud
|
||||
EOF
|
||||
|
||||
- name: Build images
|
||||
# `--pull` forces re-fetching pinned base images so a CVE
|
||||
# re-publication of the same tag (e.g. node:20.19.0-alpine3.21,
|
||||
# postgres:16-alpine) is picked up instead of being served
|
||||
# from the host's stale Docker layer cache.
|
||||
run: |
|
||||
docker compose \
|
||||
-f docker-compose.prod.yml \
|
||||
-p archiv-staging \
|
||||
--env-file .env.staging \
|
||||
--profile staging \
|
||||
build --pull
|
||||
|
||||
- name: Deploy staging
|
||||
run: |
|
||||
docker compose \
|
||||
-f docker-compose.prod.yml \
|
||||
-p archiv-staging \
|
||||
--env-file .env.staging \
|
||||
--profile staging \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Smoke test deployed environment
|
||||
# Healthchecks confirm containers are healthy; they do NOT confirm the
|
||||
# public surface works. This step catches: Caddy not reloaded, HSTS
|
||||
# header dropped, /actuator block bypassed.
|
||||
#
|
||||
# --resolve pins staging.raddatz.cloud to the runner's loopback so we
|
||||
# do NOT depend on the host router doing hairpin NAT (many SOHO
|
||||
# routers do not, or do so only after a firmware update). SNI still
|
||||
# uses the public hostname so the cert validates correctly.
|
||||
run: |
|
||||
set -e
|
||||
HOST="staging.raddatz.cloud"
|
||||
URL="https://$HOST"
|
||||
RESOLVE="--resolve $HOST:443:127.0.0.1"
|
||||
echo "Smoke test: $URL (pinned to 127.0.0.1)"
|
||||
curl -fsS $RESOLVE --max-time 10 "$URL/login" -o /dev/null
|
||||
# Pin the preload-list-eligible HSTS value, not just header presence:
|
||||
# a degraded `max-age=1` or a dropped `includeSubDomains; preload` must
|
||||
# fail this check rather than pass it silently.
|
||||
curl -fsS $RESOLVE --max-time 10 -I "$URL/" \
|
||||
| grep -Eqi 'strict-transport-security:[[:space:]]*max-age=31536000.*includeSubDomains.*preload'
|
||||
# Permissions-Policy denies APIs the app does not use (camera,
|
||||
# microphone, geolocation). A regression that loosens or drops the
|
||||
# header now fails the smoke step.
|
||||
curl -fsS $RESOLVE --max-time 10 -I "$URL/" \
|
||||
| grep -Eqi 'permissions-policy:[[:space:]]*camera=\(\),[[:space:]]*microphone=\(\),[[:space:]]*geolocation=\(\)'
|
||||
status=$(curl -s $RESOLVE -o /dev/null -w "%{http_code}" --max-time 10 "$URL/actuator/health")
|
||||
[ "$status" = "404" ] || { echo "expected 404 from /actuator/health, got $status"; exit 1; }
|
||||
echo "All smoke checks passed"
|
||||
|
||||
- name: Cleanup env file
|
||||
# LOAD-BEARING: `if: always()` is the linchpin of the ADR-011
|
||||
# single-tenant runner trust model. Every secret in .env.staging
|
||||
# is plain text on the runner filesystem until this step runs.
|
||||
# If a future refactor drops `if: always()`, a failed deploy
|
||||
# leaves the env-file behind. Do not remove this conditional
|
||||
# without first re-evaluating ADR-011.
|
||||
if: always()
|
||||
run: rm -f .env.staging
|
||||
128
.gitea/workflows/release.yml
Normal file
128
.gitea/workflows/release.yml
Normal file
@@ -0,0 +1,128 @@
|
||||
name: release
|
||||
|
||||
# Builds and deploys the production environment on `v*` tag push.
|
||||
# Runs on the self-hosted runner via Docker-out-of-Docker; images are
|
||||
# tagged with the actual git tag (e.g. v1.0.0) so rollback is
|
||||
# `TAG=<previous> docker compose -f docker-compose.prod.yml -p archiv-production up -d --wait`
|
||||
#
|
||||
# Operational assumptions (see docs/DEPLOYMENT.md §3 for the full setup):
|
||||
#
|
||||
# 1. Single-tenant self-hosted runner. The "Write production env file"
|
||||
# step writes every secret to .env.production on the runner
|
||||
# filesystem; the `if: always()` cleanup step removes it. A
|
||||
# multi-tenant runner would need to switch to
|
||||
# `docker compose --env-file <(stdin)` instead.
|
||||
#
|
||||
# 2. Host docker layer cache is authoritative. There is no
|
||||
# actions/cache; we rely on the host daemon to keep Maven and npm
|
||||
# layers warm between runs. A `docker system prune` on the host
|
||||
# will cause the next release build to be cold (5–10 min slower).
|
||||
#
|
||||
# Production environment:
|
||||
# - project name: archiv-production
|
||||
# - host ports: backend 8080, frontend 3000
|
||||
# - profile: (none) — mailpit is excluded; real SMTP relay is used
|
||||
#
|
||||
# Required Gitea secrets:
|
||||
# PROD_POSTGRES_PASSWORD
|
||||
# PROD_MINIO_PASSWORD
|
||||
# PROD_MINIO_APP_PASSWORD
|
||||
# PROD_OCR_TRAINING_TOKEN
|
||||
# PROD_APP_ADMIN_USERNAME (CRITICAL: see docs/DEPLOYMENT.md)
|
||||
# PROD_APP_ADMIN_PASSWORD (CRITICAL: locked in on first deploy)
|
||||
# MAIL_HOST
|
||||
# MAIL_PORT
|
||||
# MAIL_USERNAME
|
||||
# MAIL_PASSWORD
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
|
||||
jobs:
|
||||
deploy-production:
|
||||
# See nightly.yml — same rationale: `ubuntu-latest` matches the
|
||||
# advertised label of our single-tenant self-hosted runner.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Write production env file
|
||||
run: |
|
||||
cat > .env.production <<EOF
|
||||
TAG=${{ gitea.ref_name }}
|
||||
PORT_BACKEND=8080
|
||||
PORT_FRONTEND=3000
|
||||
APP_DOMAIN=archiv.raddatz.cloud
|
||||
POSTGRES_PASSWORD=${{ secrets.PROD_POSTGRES_PASSWORD }}
|
||||
MINIO_PASSWORD=${{ secrets.PROD_MINIO_PASSWORD }}
|
||||
MINIO_APP_PASSWORD=${{ secrets.PROD_MINIO_APP_PASSWORD }}
|
||||
OCR_TRAINING_TOKEN=${{ secrets.PROD_OCR_TRAINING_TOKEN }}
|
||||
APP_ADMIN_USERNAME=${{ secrets.PROD_APP_ADMIN_USERNAME }}
|
||||
APP_ADMIN_PASSWORD=${{ secrets.PROD_APP_ADMIN_PASSWORD }}
|
||||
MAIL_HOST=${{ secrets.MAIL_HOST }}
|
||||
MAIL_PORT=${{ secrets.MAIL_PORT }}
|
||||
MAIL_USERNAME=${{ secrets.MAIL_USERNAME }}
|
||||
MAIL_PASSWORD=${{ secrets.MAIL_PASSWORD }}
|
||||
MAIL_SMTP_AUTH=true
|
||||
MAIL_STARTTLS_ENABLE=true
|
||||
APP_MAIL_FROM=noreply@raddatz.cloud
|
||||
EOF
|
||||
|
||||
- name: Build images
|
||||
# `--pull` forces re-fetching pinned base images so a CVE
|
||||
# re-publication of the same tag is picked up rather than served
|
||||
# from the host's stale Docker layer cache.
|
||||
run: |
|
||||
docker compose \
|
||||
-f docker-compose.prod.yml \
|
||||
-p archiv-production \
|
||||
--env-file .env.production \
|
||||
build --pull
|
||||
|
||||
- name: Deploy production
|
||||
run: |
|
||||
docker compose \
|
||||
-f docker-compose.prod.yml \
|
||||
-p archiv-production \
|
||||
--env-file .env.production \
|
||||
up -d --wait --remove-orphans
|
||||
|
||||
- name: Smoke test deployed environment
|
||||
# See nightly.yml — same three checks, against the prod vhost.
|
||||
# --resolve pins archiv.raddatz.cloud to the runner's loopback so
|
||||
# the smoke test does NOT depend on hairpin NAT on the host router.
|
||||
run: |
|
||||
set -e
|
||||
HOST="archiv.raddatz.cloud"
|
||||
URL="https://$HOST"
|
||||
RESOLVE="--resolve $HOST:443:127.0.0.1"
|
||||
echo "Smoke test: $URL (pinned to 127.0.0.1)"
|
||||
curl -fsS $RESOLVE --max-time 10 "$URL/login" -o /dev/null
|
||||
# Pin the preload-list-eligible HSTS value, not just header presence:
|
||||
# a degraded `max-age=1` or a dropped `includeSubDomains; preload` must
|
||||
# fail this check rather than pass it silently.
|
||||
curl -fsS $RESOLVE --max-time 10 -I "$URL/" \
|
||||
| grep -Eqi 'strict-transport-security:[[:space:]]*max-age=31536000.*includeSubDomains.*preload'
|
||||
# Permissions-Policy denies APIs the app does not use (camera,
|
||||
# microphone, geolocation). A regression that loosens or drops the
|
||||
# header now fails the smoke step.
|
||||
curl -fsS $RESOLVE --max-time 10 -I "$URL/" \
|
||||
| grep -Eqi 'permissions-policy:[[:space:]]*camera=\(\),[[:space:]]*microphone=\(\),[[:space:]]*geolocation=\(\)'
|
||||
status=$(curl -s $RESOLVE -o /dev/null -w "%{http_code}" --max-time 10 "$URL/actuator/health")
|
||||
[ "$status" = "404" ] || { echo "expected 404 from /actuator/health, got $status"; exit 1; }
|
||||
echo "All smoke checks passed"
|
||||
|
||||
- name: Cleanup env file
|
||||
# LOAD-BEARING: `if: always()` is the linchpin of the ADR-011
|
||||
# single-tenant runner trust model. Every secret in
|
||||
# .env.production is plain text on the runner filesystem until
|
||||
# this step runs. If a future refactor drops `if: always()`, a
|
||||
# failed deploy leaves the env-file behind. Do not remove this
|
||||
# conditional without first re-evaluating ADR-011.
|
||||
if: always()
|
||||
run: rm -f .env.production
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -18,5 +18,11 @@ scripts/large-data.sql
|
||||
.claude/worktrees/
|
||||
.claude/scheduled_tasks.lock
|
||||
|
||||
# Run artifacts from verification tooling
|
||||
proofshot-artifacts/
|
||||
|
||||
# Root-level Node.js tooling artifacts
|
||||
node_modules/
|
||||
|
||||
# Repo uses npm; yarn.lock is ignored to avoid double-lockfile drift.
|
||||
frontend/yarn.lock
|
||||
|
||||
4
.vscode/settings.json
vendored
4
.vscode/settings.json
vendored
@@ -1,4 +1,6 @@
|
||||
{
|
||||
"java.configuration.updateBuildConfiguration": "interactive",
|
||||
"java.compile.nullAnalysis.mode": "automatic"
|
||||
"java.compile.nullAnalysis.mode": "automatic",
|
||||
"plantuml.render": "PlantUMLServer",
|
||||
"plantuml.server": "http://heim-nas:8500"
|
||||
}
|
||||
241
CLAUDE.md
241
CLAUDE.md
@@ -1,7 +1,11 @@
|
||||
# CLAUDE.md
|
||||
|
||||
> For a human-readable project overview, see [README.md](./README.md).
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
> For a human-readable project overview, see [README.md](./README.md).
|
||||
|
||||
## Project Overview
|
||||
|
||||
**Familienarchiv** is a family document archival system — a full-stack web app for digitizing, organizing, and searching family documents. Key features: file uploads (stored in MinIO/S3), metadata management, Excel/ODS batch import, full-text search, conversation threads between family members, and role-based access control.
|
||||
@@ -16,6 +20,8 @@ See [CODESTYLE.md](./CODESTYLE.md) for coding standards: Clean Code, DRY/KISS tr
|
||||
|
||||
## Stack
|
||||
|
||||
→ See [README.md §Tech Stack](./README.md#tech-stack)
|
||||
|
||||
- **Backend**: Spring Boot 4.0 (Java 21, Maven, Jetty, JPA/Hibernate, Flyway, Spring Security, Spring Session JDBC)
|
||||
- **Frontend**: SvelteKit 2 with Svelte 5, TypeScript, Tailwind CSS 4, Paraglide.js (i18n: de/en/es)
|
||||
- **Database**: PostgreSQL 16
|
||||
@@ -25,12 +31,13 @@ See [CODESTYLE.md](./CODESTYLE.md) for coding standards: Clean Code, DRY/KISS tr
|
||||
## Common Commands
|
||||
|
||||
### Running the Full Stack
|
||||
|
||||
```bash
|
||||
# From repo root — starts PostgreSQL, MinIO, and Spring Boot backend
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### Backend (Spring Boot)
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
@@ -42,11 +49,12 @@ cd backend
|
||||
```
|
||||
|
||||
### Frontend (SvelteKit)
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
|
||||
npm install
|
||||
npm run dev # Dev server (port 3000)
|
||||
npm run dev # Dev server (port 5173)
|
||||
npm run build # Production build
|
||||
npm run preview # Preview production build
|
||||
|
||||
@@ -64,7 +72,7 @@ npm run generate:api # Regenerate TypeScript API types from OpenAPI spec
|
||||
|
||||
### Package Structure
|
||||
|
||||
Package-by-domain: each domain owns its controller, service, repository, entities, and DTOs.
|
||||
<!-- TODO: rewrite post-REFACTOR-1 — see Epic 4 -->
|
||||
|
||||
```
|
||||
backend/src/main/java/org/raddatz/familienarchiv/
|
||||
@@ -88,27 +96,21 @@ backend/src/main/java/org/raddatz/familienarchiv/
|
||||
└── user/ User domain — AppUser, UserGroup, UserService, auth controllers
|
||||
```
|
||||
|
||||
### Layering Rules (strictly enforced)
|
||||
### Layering Rules
|
||||
|
||||
```
|
||||
Controller → Service → Repository → DB
|
||||
```
|
||||
→ See [docs/ARCHITECTURE.md §Layering rule](./docs/ARCHITECTURE.md#layering-rule)
|
||||
|
||||
- **Controllers** never inject or call repositories directly.
|
||||
- **Services** never reach into another domain's repository. Call the other domain's service instead.
|
||||
- ✅ `DocumentService` → `PersonService.getById()` → `PersonRepository`
|
||||
- ❌ `DocumentService` → `PersonRepository` directly
|
||||
- This keeps domain boundaries clear and business logic testable in isolation.
|
||||
**LLM reminder:** controllers never call repositories directly; services never reach into another domain's repository — always call the other domain's service instead.
|
||||
|
||||
### Domain Model
|
||||
|
||||
| Entity | Table | Key relationships |
|
||||
|---|---|---|
|
||||
| `Document` | `documents` | ManyToOne `sender` (Person), ManyToMany `receivers` (Person), ManyToMany `tags` (Tag) |
|
||||
| `Person` | `persons` | Referenced by documents as sender/receiver |
|
||||
| `Tag` | `tag` | ManyToMany with documents via `document_tags` |
|
||||
| `AppUser` | `app_users` | ManyToMany `groups` (UserGroup) |
|
||||
| `UserGroup` | `user_groups` | Has a `Set<String> permissions` |
|
||||
| Entity | Table | Key relationships |
|
||||
| ----------- | ------------- | ------------------------------------------------------------------------------------- |
|
||||
| `Document` | `documents` | ManyToOne `sender` (Person), ManyToMany `receivers` (Person), ManyToMany `tags` (Tag) |
|
||||
| `Person` | `persons` | Referenced by documents as sender/receiver |
|
||||
| `Tag` | `tag` | ManyToMany with documents via `document_tags` |
|
||||
| `AppUser` | `app_users` | ManyToMany `groups` (UserGroup) |
|
||||
| `UserGroup` | `user_groups` | Has a `Set<String> permissions` |
|
||||
|
||||
**`DocumentStatus` lifecycle:** `PLACEHOLDER → UPLOADED → TRANSCRIBED → REVIEWED → ARCHIVED`
|
||||
|
||||
@@ -118,6 +120,7 @@ Controller → Service → Repository → DB
|
||||
### Entity Code Style
|
||||
|
||||
All entities use these Lombok annotations:
|
||||
|
||||
```java
|
||||
@Entity
|
||||
@Table(name = "table_name")
|
||||
@@ -146,65 +149,29 @@ Services are annotated with `@Service`, `@RequiredArgsConstructor`, and optional
|
||||
- Read methods are not annotated (default non-transactional is fine).
|
||||
- Each service owns its domain's repository. Cross-domain data access goes through the other domain's service.
|
||||
|
||||
**Existing services:**
|
||||
|
||||
| Service | Responsibility |
|
||||
|---|---|
|
||||
| `DocumentService` | Document CRUD, search, tag cascade delete |
|
||||
| `PersonService` | Person CRUD, find-or-create by alias |
|
||||
| `TagService` | Tag find/create/update/delete |
|
||||
| `UserService` | User and group CRUD |
|
||||
| `FileService` | S3/MinIO upload and download |
|
||||
| `MassImportService` | Async ODS/Excel import; delegates to PersonService and TagService |
|
||||
|
||||
### DTOs
|
||||
|
||||
Input DTOs live in `dto/`. Response types are the model entities themselves (no response DTOs).
|
||||
Input DTOs live flat in the domain package. Response types are the model entities themselves (no response DTOs).
|
||||
|
||||
- `DocumentUpdateDTO` — used for both create and update (all fields optional)
|
||||
- `CreateUserRequest` — user creation
|
||||
- `GroupDTO` — group create/update
|
||||
- `@Schema(requiredMode = REQUIRED)` on every field the backend always populates — drives TypeScript generation.
|
||||
|
||||
### Error Handling
|
||||
|
||||
Use `DomainException` for all domain errors. Never throw raw exceptions from service methods.
|
||||
→ See [CONTRIBUTING.md §Error handling](./CONTRIBUTING.md#error-handling)
|
||||
|
||||
```java
|
||||
// Static factories match common HTTP status codes:
|
||||
DomainException.notFound(ErrorCode.DOCUMENT_NOT_FOUND, "Document not found: " + id)
|
||||
DomainException.forbidden("Access denied")
|
||||
DomainException.conflict(ErrorCode.IMPORT_ALREADY_RUNNING, "Already running")
|
||||
DomainException.internal(ErrorCode.FILE_UPLOAD_FAILED, "Upload failed: " + e.getMessage())
|
||||
```
|
||||
|
||||
`ErrorCode` is an enum in `exception/ErrorCode.java`. When adding a new error case, add the value there **and** mirror it in the frontend's `src/lib/errors.ts` + add a Paraglide translation key.
|
||||
|
||||
For simple validation in controllers (not domain logic), `ResponseStatusException` is acceptable:
|
||||
```java
|
||||
throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "firstName is required");
|
||||
```
|
||||
**LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` from service methods — never throw raw exceptions. When adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) mirror in `frontend/src/lib/shared/errors.ts`, (3) add i18n keys in `messages/{de,en,es}.json`.
|
||||
|
||||
### Security / Permissions
|
||||
|
||||
Use `@RequirePermission` on controller methods (or the whole controller class):
|
||||
→ See [docs/ARCHITECTURE.md §Permission system](./docs/ARCHITECTURE.md#permission-system)
|
||||
|
||||
```java
|
||||
@RequirePermission(Permission.WRITE_ALL)
|
||||
public Document updateDocument(...) { ... }
|
||||
```
|
||||
|
||||
Available permissions: `READ_ALL`, `WRITE_ALL`, `ADMIN`, `ADMIN_USER`, `ADMIN_TAG`, `ADMIN_PERMISSION`
|
||||
|
||||
`PermissionAspect` (AOP) checks the current user's `UserGroup.permissions` at runtime.
|
||||
**LLM reminder:** `@RequirePermission(Permission.WRITE_ALL)` is **required** on every `POST`, `PUT`, `PATCH`, `DELETE` endpoint — not optional. Do not mix with Spring Security's `@PreAuthorize`. Available permissions: `READ_ALL`, `WRITE_ALL`, `ADMIN`, `ADMIN_USER`, `ADMIN_TAG`, `ADMIN_PERMISSION`, `ANNOTATE_ALL`, `BLOG_WRITE`.
|
||||
|
||||
### OpenAPI / API Types
|
||||
|
||||
SpringDoc generates the spec at `/v3/api-docs` (only accessible when running with `--spring.profiles.active=dev`).
|
||||
→ See [CONTRIBUTING.md §Walkthrough B — Add a new endpoint](./CONTRIBUTING.md#4-walkthrough-b--add-a-new-endpoint)
|
||||
|
||||
When changing any model field or endpoint:
|
||||
1. Rebuild the backend JAR with `-DskipTests`
|
||||
2. Start it with `--spring.profiles.active=dev`
|
||||
3. Run `npm run generate:api` in `frontend/`
|
||||
**LLM reminder:** always run `npm run generate:api` in `frontend/` after any backend model or endpoint change — this is the most common cause of TypeScript type errors.
|
||||
|
||||
---
|
||||
|
||||
@@ -214,147 +181,99 @@ When changing any model field or endpoint:
|
||||
|
||||
```
|
||||
frontend/src/routes/
|
||||
├── +layout.svelte Global header (sticky), nav links, logout
|
||||
├── +layout.server.ts Loads current user, injects auth cookie
|
||||
├── +page.svelte Home / document search
|
||||
├── +page.server.ts Load: search documents; no actions
|
||||
├── +layout.svelte / +layout.server.ts Global layout, auth cookie
|
||||
├── +page.svelte / +page.server.ts Home / document search dashboard
|
||||
├── documents/
|
||||
│ ├── [id]/+page.svelte Document detail (view + file preview)
|
||||
│ └── [id]/edit/ Edit form (all metadata + file upload)
|
||||
│ └── new/ Create form (same fields, empty)
|
||||
│ ├── [id]/ Document detail (view + file preview)
|
||||
│ ├── [id]/edit/ Edit form (all metadata + file upload)
|
||||
│ ├── new/ Upload form
|
||||
│ └── bulk-edit/ Multi-document edit
|
||||
├── persons/
|
||||
│ ├── +page.svelte Person list with search
|
||||
│ ├── [id]/+page.svelte Person detail (inline edit + merge)
|
||||
│ ├── [id]/ Person detail
|
||||
│ ├── [id]/edit/ Person edit form
|
||||
│ └── new/ Create person form
|
||||
├── conversations/ Bilateral conversation timeline
|
||||
├── admin/ User + group + tag management
|
||||
└── login/ logout/ Auth pages
|
||||
├── briefwechsel/ Bilateral conversation timeline (Briefwechsel)
|
||||
├── aktivitaeten/ Unified activity feed (Chronik)
|
||||
├── geschichten/ Stories — list, [id], [id]/edit, new
|
||||
├── stammbaum/ Family tree (Stammbaum)
|
||||
├── enrich/ Enrichment workflow — [id], done
|
||||
├── admin/ User, group, tag, OCR, system management
|
||||
├── hilfe/transkription/ Transcription help page
|
||||
├── profile/ User profile settings
|
||||
├── users/[id]/ Public user profile page
|
||||
├── login/ logout/ register/
|
||||
├── forgot-password/ reset-password/
|
||||
└── demo/ Dev-only demos
|
||||
```
|
||||
|
||||
### API Client Pattern
|
||||
|
||||
All server-side API calls use the typed client from `$lib/api.server.ts`:
|
||||
→ See [CONTRIBUTING.md §Frontend API client](./CONTRIBUTING.md#frontend-api-client)
|
||||
|
||||
```typescript
|
||||
const api = createApiClient(fetch);
|
||||
const result = await api.GET('/api/persons/{id}', { params: { path: { id } } });
|
||||
|
||||
// Always check via response.ok, NOT result.error
|
||||
if (!result.response.ok) {
|
||||
const code = (result.error as unknown as { code?: string })?.code;
|
||||
throw error(result.response.status, getErrorMessage(code));
|
||||
}
|
||||
return { person: result.data! };
|
||||
```
|
||||
|
||||
Key rules:
|
||||
- Use `!result.response.ok` for error checking (not `if (result.error)` — this breaks when the spec has no error responses defined)
|
||||
- Cast errors as `result.error as unknown as { code?: string }` to extract the backend error code
|
||||
- Use `result.data!` (non-null assertion) after an ok check — TypeScript knows it's present
|
||||
|
||||
For multipart/form-data endpoints (file uploads), bypass the typed client and use raw `fetch`:
|
||||
```typescript
|
||||
const res = await fetch(`${baseUrl}/api/documents`, { method: 'POST', body: formData });
|
||||
```
|
||||
**LLM reminder:** check `!result.response.ok` (not `result.error` — breaks when spec has no error responses defined); cast errors as `result.error as unknown as { code?: string }`; use `result.data!` after an ok check.
|
||||
|
||||
### Form Actions Pattern
|
||||
|
||||
```typescript
|
||||
// +page.server.ts
|
||||
export const actions = {
|
||||
default: async ({ request, fetch }) => {
|
||||
const formData = await request.formData();
|
||||
const name = formData.get('name') as string; // cast needed — FormData returns FormDataEntryValue
|
||||
// ...
|
||||
return fail(400, { error: 'message' }); // on error
|
||||
throw redirect(303, '/target'); // on success
|
||||
}
|
||||
default: async ({ request, fetch }) => {
|
||||
const formData = await request.formData();
|
||||
const name = formData.get("name") as string;
|
||||
// ...
|
||||
return fail(400, { error: "message" }); // on error
|
||||
throw redirect(303, "/target"); // on success
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
### Date Handling
|
||||
|
||||
- **Forms**: German format `dd.mm.yyyy` with auto-dot insertion via `handleDateInput()`. A hidden `<input type="hidden" name="documentDate" value={dateIso}>` sends ISO format to the backend.
|
||||
- **Display**: Always use `Intl.DateTimeFormat` with `T12:00:00` suffix to prevent UTC timezone off-by-one:
|
||||
```typescript
|
||||
new Intl.DateTimeFormat('de-DE', { day: 'numeric', month: 'long', year: 'numeric' })
|
||||
.format(new Date(doc.documentDate + 'T12:00:00'))
|
||||
```
|
||||
→ See [CONTRIBUTING.md §Date handling](./CONTRIBUTING.md#date-handling)
|
||||
|
||||
**LLM reminder:** always append `T12:00:00` when constructing `new Date()` from an ISO date string — prevents UTC timezone off-by-one errors.
|
||||
|
||||
### UI Component Library
|
||||
|
||||
Custom components in `src/lib/components/`:
|
||||
|
||||
| Component | Props | Description |
|
||||
|---|---|---|
|
||||
| `PersonTypeahead` | `name`, `label`, `value`, `initialName`, `on:change` | Single-person selector with typeahead dropdown |
|
||||
| `PersonMultiSelect` | `selectedPersons` (bind) | Chip-based multi-person selector |
|
||||
| `TagInput` | `tags` (bind), `allowCreation?`, `on:change` | Tag chip input with typeahead |
|
||||
→ See per-domain READMEs: [`frontend/src/lib/person/README.md`](./frontend/src/lib/person/README.md), [`frontend/src/lib/tag/README.md`](./frontend/src/lib/tag/README.md), [`frontend/src/lib/document/README.md`](./frontend/src/lib/document/README.md), [`frontend/src/lib/shared/README.md`](./frontend/src/lib/shared/README.md)
|
||||
|
||||
### Styling Conventions (Tailwind CSS 4)
|
||||
|
||||
Brand color utilities (defined in `layout.css`):
|
||||
Brand color tokens (defined in `layout.css`):
|
||||
|
||||
| Class | Value | Usage |
|
||||
|---|---|---|
|
||||
| `brand-navy` | `#002850` | Primary text, buttons, headers |
|
||||
| `brand-mint` | `#A6DAD8` | Accents, hover underlines, icons |
|
||||
| `brand-sand` | `#E4E2D7` | Page background, card borders |
|
||||
| Token / Utility | CSS variable | Usage |
|
||||
| ---------------- | ---------------- | ------------------------------------------------------- |
|
||||
| `brand-navy` | `--palette-navy` | Tailwind utility — buttons, headers, primary text |
|
||||
| `brand-mint` | `--palette-mint` | Tailwind utility — accents, hover underlines, icons |
|
||||
| `--palette-sand` | `--palette-sand` | Palette constant only — use `bg-canvas` or `bg-surface` |
|
||||
|
||||
Typography:
|
||||
- `font-serif` (Merriweather) — body text, document titles, names
|
||||
|
||||
- `font-serif` (Tinos) — body text, document titles, names
|
||||
- `font-sans` (Montserrat) — labels, metadata, UI chrome
|
||||
|
||||
Card pattern for content sections:
|
||||
|
||||
```svelte
|
||||
<div class="bg-white shadow-sm border border-brand-sand rounded-sm p-6">
|
||||
<h2 class="text-xs font-bold uppercase tracking-widest text-gray-400 mb-5">Section Title</h2>
|
||||
<div class="rounded-sm border border-line bg-surface shadow-sm p-6">
|
||||
<h2 class="text-xs font-bold uppercase tracking-widest text-ink-3 mb-5">Section Title</h2>
|
||||
<!-- content -->
|
||||
</div>
|
||||
```
|
||||
|
||||
Save bar pattern — use **sticky full-bleed** for long forms (edit document), **card-style with `mt-4`** for short forms (new person):
|
||||
```svelte
|
||||
<!-- Long forms: sticky, full-bleed -->
|
||||
<div class="sticky bottom-0 z-10 -mx-4 px-6 py-4 bg-white border-t border-brand-sand shadow-[0_-2px_8px_rgba(0,0,0,0.06)] flex items-center justify-between">
|
||||
|
||||
<!-- Short forms: card, top margin -->
|
||||
<div class="mt-4 flex items-center justify-between rounded-sm border border-brand-sand bg-white px-6 py-4 shadow-sm">
|
||||
```
|
||||
|
||||
Back button pattern — use the shared `<BackButton>` component from `$lib/components/BackButton.svelte`:
|
||||
```svelte
|
||||
<script lang="ts">
|
||||
import BackButton from '$lib/components/BackButton.svelte';
|
||||
</script>
|
||||
|
||||
<BackButton />
|
||||
```
|
||||
The component calls `history.back()` so the user returns to wherever they came from. Label is always "Zurück" (no contextual suffix — destination is unknown). Touch target ≥ 44px and focus ring are built in. Do not use a static `<a href>` for back navigation.
|
||||
|
||||
Subtle action link (e.g. "new document/person"):
|
||||
```svelte
|
||||
<a href="/documents/new" class="inline-flex items-center gap-1 text-sm font-medium text-brand-navy/60 hover:text-brand-navy transition-colors">
|
||||
<svg class="w-4 h-4" ...><!-- plus icon --></svg>
|
||||
Neues Dokument
|
||||
</a>
|
||||
```
|
||||
Back button pattern — use the shared `<BackButton>` component from `$lib/shared/primitives/BackButton.svelte`. Do not use a static `<a href>` for back navigation.
|
||||
|
||||
### Error Handling (Frontend)
|
||||
|
||||
`src/lib/errors.ts` mirrors the backend `ErrorCode` enum and maps codes to Paraglide translation keys. When adding a new `ErrorCode` on the backend:
|
||||
1. Add it to `ErrorCode.java`
|
||||
2. Add it to the `ErrorCode` type in `errors.ts`
|
||||
3. Add a `case` in `getErrorMessage()`
|
||||
4. Add the translation key in `messages/de.json`, `en.json`, `es.json`
|
||||
→ See [CONTRIBUTING.md §Error handling](./CONTRIBUTING.md#error-handling)
|
||||
|
||||
**LLM reminder:** when adding a new `ErrorCode`: (1) add to `ErrorCode.java`, (2) add to `ErrorCode` type in `frontend/src/lib/shared/errors.ts`, (3) add a `case` in `getErrorMessage()`, (4) add i18n keys in `messages/{de,en,es}.json`.
|
||||
|
||||
---
|
||||
|
||||
## Infrastructure
|
||||
|
||||
The `docker-compose.yml` at the repo root orchestrates everything. A MinIO MC helper container runs at startup to create the `archive-documents` bucket. The backend container depends on both `db` and `minio` being healthy.
|
||||
|
||||
Database migrations live in `backend/src/main/resources/db/migration/` (Flyway, SQL files named `V{n}__{description}.sql`).
|
||||
→ See [docs/DEPLOYMENT.md](./docs/DEPLOYMENT.md)
|
||||
|
||||
## API Testing
|
||||
|
||||
@@ -362,4 +281,4 @@ HTTP test files are in `backend/api_tests/` for use with the VS Code REST Client
|
||||
|
||||
## Dev Container
|
||||
|
||||
A `.devcontainer/` config is available (Java 21 + Node 24, ports 8080 and 3000 forwarded). Use VS Code's "Reopen in Container" for a pre-configured environment.
|
||||
→ See [.devcontainer/README.md](./.devcontainer/README.md)
|
||||
|
||||
@@ -180,6 +180,8 @@ When in doubt, commit more often rather than less.
|
||||
|
||||
See [CODESTYLE.md](./CODESTYLE.md) for the full guide: Clean Code (Uncle Bob), DRY/KISS trade-offs, and SOLID principles applied to this stack.
|
||||
|
||||
For domain terminology (Person vs AppUser, DocumentStatus lifecycle, Chronik vs Aktivität, etc.) see [docs/GLOSSARY.md](./docs/GLOSSARY.md).
|
||||
|
||||
Quick reminders:
|
||||
- Pure functions over stateful helpers where possible
|
||||
- No premature abstractions — KISS beats DRY
|
||||
|
||||
305
CONTRIBUTING.md
Normal file
305
CONTRIBUTING.md
Normal file
@@ -0,0 +1,305 @@
|
||||
# Contributing to Familienarchiv
|
||||
|
||||
For the full collaboration rules (issue workflow, PR process, Red/Green TDD, commit conventions) see [COLLABORATING.md](./COLLABORATING.md).
|
||||
For coding style see [CODESTYLE.md](./CODESTYLE.md).
|
||||
For the system architecture see [docs/ARCHITECTURE.md](./docs/ARCHITECTURE.md) (introduced in DOC-2; until that PR merges, see [docs/architecture/c4-diagrams.md](./docs/architecture/c4-diagrams.md)).
|
||||
For domain terminology see [docs/GLOSSARY.md](./docs/GLOSSARY.md).
|
||||
|
||||
---
|
||||
|
||||
## 1. Environment setup
|
||||
|
||||
**Prerequisites:** Java 21 (SDKMAN), Node 24 (nvm), Docker
|
||||
|
||||
**Activate SDKMAN and nvm before running `java`, `mvn`, `node`, or `npm`:**
|
||||
|
||||
```bash
|
||||
source "$HOME/.sdkman/bin/sdkman-init.sh"
|
||||
export NVM_DIR="$HOME/.nvm" && [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. Daily development workflow
|
||||
|
||||
**Startup order — services must start in this sequence:**
|
||||
|
||||
```bash
|
||||
# 1. Start PostgreSQL and MinIO
|
||||
docker compose up -d db minio
|
||||
|
||||
# 2. Start the backend (separate terminal)
|
||||
cd backend && ./mvnw spring-boot:run
|
||||
|
||||
# 3. Start the frontend (separate terminal)
|
||||
cd frontend && npm install && npm run dev
|
||||
```
|
||||
|
||||
> `npm install` also wires up the Husky pre-commit hook via the `prepare` script.
|
||||
> Run it before your first commit, or the hook will fail to execute.
|
||||
|
||||
> **Do not use `docker-compose.ci.yml` locally** — it disables the bind mounts that the dev workflow depends on.
|
||||
|
||||
**Regenerate TypeScript types after any backend API change:**
|
||||
|
||||
```bash
|
||||
# Backend must be running with dev profile
|
||||
cd frontend && npm run generate:api
|
||||
```
|
||||
|
||||
> ⚠️ Forgetting this step is the most common cause of "where did my TypeScript type go?" — always regenerate after changing models or endpoints.
|
||||
|
||||
**Test commands:**
|
||||
|
||||
```bash
|
||||
cd backend && ./mvnw test # backend unit + slice tests
|
||||
cd frontend && npm run test # Vitest unit tests
|
||||
cd frontend && npm run check # svelte-check (type errors)
|
||||
cd frontend && npx playwright test # Playwright e2e tests
|
||||
```
|
||||
|
||||
**Branch naming:** `<type>/<issue-number>-<short-description>`, e.g. `feat/398-contributing`
|
||||
|
||||
**Commits:** one logical change per commit; reference the Gitea issue:
|
||||
|
||||
```
|
||||
feat(person): add aliases endpoint
|
||||
|
||||
Closes #42
|
||||
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
|
||||
```
|
||||
|
||||
### Test-type decision matrix
|
||||
|
||||
| What you're testing | Test type | Tool |
|
||||
|---|---|---|
|
||||
| Service business logic, calculations | Unit test | JUnit + `@ExtendWith(MockitoExtension.class)` |
|
||||
| HTTP contract, request validation, error codes | Controller slice test | `@WebMvcTest` |
|
||||
| Server `load` function | Vitest unit | Import directly, mock `fetch` |
|
||||
| Shared UI component | Vitest browser-mode | `render()` + `getByRole()` |
|
||||
| Full user-facing flow, navigation, forms | E2E | Playwright |
|
||||
|
||||
---
|
||||
|
||||
## 3. Walkthrough A — Add a new domain
|
||||
|
||||
**Example:** adding a `citation` domain (formal references to documents).
|
||||
|
||||
Both the backend and frontend are organised **domain-first**. A new domain means adding a package on both sides under the same name.
|
||||
|
||||
### Backend
|
||||
|
||||
1. Create `backend/src/main/java/org/raddatz/familienarchiv/citation/`
|
||||
|
||||
2. Add entity, repository, service, controller, and DTOs flat in the package:
|
||||
- **Entity** `Citation.java` — annotate with `@Entity @Data @Builder @NoArgsConstructor @AllArgsConstructor`; use `@GeneratedValue(strategy = GenerationType.UUID)` for the `id` field; add `@Schema(requiredMode = REQUIRED)` on every field the backend always populates
|
||||
- **Repository** `CitationRepository.java` — extends `JpaRepository<Citation, UUID>`
|
||||
- **Service** `CitationService.java` — `@Service @RequiredArgsConstructor`; write methods `@Transactional`, read methods unannotated; cross-domain data goes through the other domain's service, never its repository
|
||||
- **Controller** `CitationController.java` — `@RestController @RequestMapping("/api/citations")`
|
||||
|
||||
3. Add `@RequirePermission(Permission.WRITE_ALL)` on every `POST`, `PUT`, `PATCH`, and `DELETE` endpoint — **this is not optional**. Read-only `GET` endpoints stay unannotated.
|
||||
|
||||
4. Add a Flyway migration: `backend/src/main/resources/db/migration/V{n}__{description}.sql` (use the next sequential number after the highest existing one).
|
||||
|
||||
5. **Write failing tests before any implementation** (Red step):
|
||||
- Service unit test for business logic (`@ExtendWith(MockitoExtension.class)`)
|
||||
- `@WebMvcTest` slice test for each HTTP endpoint
|
||||
|
||||
6. Rebuild with `--spring.profiles.active=dev` and run `npm run generate:api` in `frontend/`.
|
||||
|
||||
### Frontend
|
||||
|
||||
7. Create `frontend/src/lib/citation/` — domain-specific Svelte components and TypeScript utilities go here.
|
||||
|
||||
8. Add routes under `frontend/src/routes/citations/` as needed.
|
||||
|
||||
9. Add a per-domain `README.md` in both the backend package folder and `frontend/src/lib/citation/` (per DOC-6).
|
||||
|
||||
### Documentation
|
||||
|
||||
10. Update `docs/ARCHITECTURE.md` Section 2 to include the new domain.
|
||||
11. Update `docs/GLOSSARY.md` if new terms are introduced.
|
||||
12. Update the ESLint boundary allow-list in `frontend/eslint.config.js` if the domain needs to import from another domain.
|
||||
|
||||
---
|
||||
|
||||
## 4. Walkthrough B — Add a new endpoint
|
||||
|
||||
**Example:** `POST /api/persons/{id}/aliases` — attach a name alias to an existing person.
|
||||
|
||||
### Red (write failing tests first)
|
||||
|
||||
1. Write a failing `@WebMvcTest` controller slice test:
|
||||
```java
|
||||
@Test
|
||||
void addAlias_returns201_whenAliasCreated() { ... }
|
||||
```
|
||||
|
||||
2. Write a failing service unit test:
|
||||
```java
|
||||
@Test
|
||||
void addAlias_throwsNotFound_whenPersonDoesNotExist() { ... }
|
||||
```
|
||||
|
||||
### Green (implement)
|
||||
|
||||
3. Add the service method in `PersonService.java`:
|
||||
```java
|
||||
@Transactional
|
||||
public PersonNameAlias addAlias(UUID personId, PersonNameAliasDTO dto) { ... }
|
||||
```
|
||||
|
||||
4. Add the controller method in `PersonController.java`:
|
||||
```java
|
||||
@PostMapping("/{id}/aliases")
|
||||
@RequirePermission(Permission.WRITE_ALL)
|
||||
public ResponseEntity<PersonNameAlias> addAlias(@PathVariable UUID id,
|
||||
@RequestBody PersonNameAliasDTO dto) { ... }
|
||||
```
|
||||
`@RequirePermission(Permission.WRITE_ALL)` on every state-mutating endpoint — **not optional**.
|
||||
|
||||
5. Validate user-supplied inputs at the controller boundary:
|
||||
```java
|
||||
if (dto.name() == null || dto.name().isBlank())
|
||||
throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "name is required");
|
||||
```
|
||||
Validate at system boundaries; trust internal service code.
|
||||
|
||||
6. Use `DomainException` for domain errors:
|
||||
```java
|
||||
DomainException.notFound(ErrorCode.PERSON_NOT_FOUND, "Person not found: " + id)
|
||||
```
|
||||
If you need a new error code, add it to `ErrorCode.java`, mirror it in
|
||||
`frontend/src/lib/shared/errors.ts`, and add translation keys in `messages/{de,en,es}.json`.
|
||||
|
||||
7. Mark every field the backend always populates with `@Schema(requiredMode = REQUIRED)` — this drives TypeScript type generation.
|
||||
|
||||
### Types and tests
|
||||
|
||||
8. Rebuild with `--spring.profiles.active=dev`, then `npm run generate:api` in `frontend/`.
|
||||
|
||||
> ⚠️ **Always regenerate types after any API change.** This is the #1 cause of "where did my TypeScript type go?"
|
||||
|
||||
9. Run the full test suite — all green before committing.
|
||||
|
||||
---
|
||||
|
||||
## 5. Walkthrough C — Add a new frontend page
|
||||
|
||||
**Example:** `/persons/[id]/timeline` — a chronological event timeline for one person.
|
||||
|
||||
### Red (write failing test first)
|
||||
|
||||
1. Write a failing Playwright E2E test for the user flow:
|
||||
```typescript
|
||||
test('timeline shows events in chronological order', async ({ page }) => {
|
||||
await page.goto('/persons/1/timeline');
|
||||
// assertions...
|
||||
});
|
||||
```
|
||||
|
||||
### Green (implement)
|
||||
|
||||
2. Create `frontend/src/routes/persons/[id]/timeline/+page.svelte`
|
||||
|
||||
3. Add `frontend/src/routes/persons/[id]/timeline/+page.server.ts` for the SSR load:
|
||||
```typescript
|
||||
import { createApiClient } from '$lib/shared/api.server';
|
||||
export const load: PageServerLoad = async ({ params, fetch }) => {
|
||||
const api = createApiClient(fetch);
|
||||
const result = await api.GET('/api/persons/{id}', { params: { path: { id: params.id } } });
|
||||
if (!result.response.ok) throw error(result.response.status, '...');
|
||||
return { person: result.data! };
|
||||
};
|
||||
```
|
||||
|
||||
4. Domain-specific components (e.g. `TimelineEntry.svelte`) → `frontend/src/lib/person/`
|
||||
|
||||
5. Shared primitives (e.g. a generic date-range display) → `frontend/src/lib/shared/primitives/`
|
||||
|
||||
6. UI patterns to follow:
|
||||
- Back navigation: `import BackButton from '$lib/shared/primitives/BackButton.svelte'`
|
||||
- Date display: always append `T12:00:00` — `new Intl.DateTimeFormat('de-DE', …).format(new Date(val + 'T12:00:00'))` — prevents UTC off-by-one errors
|
||||
- Brand colors: `brand-navy`, `brand-mint`, `brand-sand` (defined in `src/routes/layout.css`)
|
||||
- Accessibility: touch targets ≥ 44 px (`min-h-[44px]`); focus rings (`focus-visible:ring-2 focus-visible:ring-brand-navy`); `aria-label` on icon-only buttons; `aria-live="polite"` on dynamic status messages
|
||||
|
||||
7. Add Paraglide i18n keys in `messages/de.json`, `messages/en.json`, `messages/es.json`.
|
||||
|
||||
8. If adding a new error code: mirror in `frontend/src/lib/shared/errors.ts` and add translation keys.
|
||||
|
||||
9. Make all tests green before committing.
|
||||
|
||||
---
|
||||
|
||||
## 6. Conventions reference
|
||||
|
||||
### Error handling
|
||||
|
||||
| Scenario | Pattern |
|
||||
|---|---|
|
||||
| Domain entity not found | `DomainException.notFound(ErrorCode.X, "…")` |
|
||||
| Permission denied | `DomainException.forbidden("…")` |
|
||||
| Concurrent edit conflict | `DomainException.conflict(ErrorCode.X, "…")` |
|
||||
| Infrastructure failure | `DomainException.internal(ErrorCode.X, "…")` |
|
||||
| Simple controller validation | `throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "…")` |
|
||||
|
||||
New error code: `ErrorCode.java` → `frontend/src/lib/shared/errors.ts` → `messages/{de,en,es}.json`.
|
||||
|
||||
### DTOs
|
||||
|
||||
- Input DTOs live flat in the domain package (e.g. `PersonUpdateDTO.java`)
|
||||
- Responses are the entity itself — no separate response DTOs
|
||||
- `@Schema(requiredMode = REQUIRED)` on every field the backend always populates
|
||||
|
||||
### Frontend API client
|
||||
|
||||
```typescript
|
||||
const api = createApiClient(fetch); // from $lib/shared/api.server
|
||||
const result = await api.GET('/api/persons/{id}', { params: { path: { id } } });
|
||||
if (!result.response.ok) {
|
||||
const code = (result.error as unknown as { code?: string })?.code;
|
||||
throw error(result.response.status, getErrorMessage(code));
|
||||
}
|
||||
return { person: result.data! }; // non-null assertion is safe after the ok check
|
||||
```
|
||||
|
||||
For multipart/form-data (file uploads): bypass the typed client and use raw `fetch` — the client cannot handle it.
|
||||
|
||||
### Date handling
|
||||
|
||||
| Context | Pattern |
|
||||
|---|---|
|
||||
| Form display | German `dd.mm.yyyy` with auto-dot insertion via `handleDateInput()` |
|
||||
| Wire format | ISO 8601 via a hidden `<input type="hidden" name="documentDate" value={dateIso}>` |
|
||||
| Display | `new Intl.DateTimeFormat('de-DE', …).format(new Date(val + 'T12:00:00'))` |
|
||||
|
||||
### Security checklist (new endpoint)
|
||||
|
||||
- `@RequirePermission(Permission.WRITE_ALL)` on every `POST`, `PUT`, `PATCH`, `DELETE` — required, not optional
|
||||
- Validate all user-supplied inputs at the controller boundary before passing to the service
|
||||
- Parameterised queries only — never interpolate user input into JPQL/SQL strings
|
||||
- No raw user input in log messages — use `{}` placeholders: `log.warn("Not found: {}", id)`
|
||||
- Validate content-type and size on upload endpoints before reading the stream
|
||||
|
||||
### Accessibility baseline (new frontend page)
|
||||
|
||||
- Touch targets ≥ 44 px on all interactive elements (`min-h-[44px]`)
|
||||
- Focus rings on all focusable elements (`focus-visible:ring-2 focus-visible:ring-brand-navy`)
|
||||
- `aria-label` on every icon-only button
|
||||
- `aria-live="polite"` on dynamic status messages
|
||||
- Color is never the sole status indicator
|
||||
|
||||
Full WCAG 2.1 AA reference: [docs/STYLEGUIDE.md](./docs/STYLEGUIDE.md).
|
||||
|
||||
### Lint and format
|
||||
|
||||
```bash
|
||||
# Frontend
|
||||
cd frontend && npm run lint # Prettier + ESLint check
|
||||
cd frontend && npm run format # Auto-fix formatting
|
||||
cd frontend && npm run check # svelte-check (type errors)
|
||||
|
||||
# Backend — no standalone lint tool; compilation and test runs catch style issues
|
||||
cd backend && ./mvnw test # compile + test
|
||||
cd backend && ./mvnw clean package -DskipTests # compile-only check
|
||||
```
|
||||
93
README.md
Normal file
93
README.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Familienarchiv
|
||||
|
||||
Familienarchiv is a private web application for digitising, organising, and searching a family document collection — letters, postcards, and photographs from 1899 to 1950. Family members upload scans, transcribe handwritten text (Kurrent/Sütterlin), and read the archive from any device.
|
||||
|
||||
---
|
||||
|
||||
## Subsystems
|
||||
|
||||
- `frontend/` — SvelteKit 2 / Svelte 5 / TypeScript / Tailwind 4 web app (server-side rendered)
|
||||
- `backend/` — Spring Boot 4 (Java 21) REST API; handles documents, persons, search, and user management
|
||||
- `ocr-service/` — Python FastAPI microservice for OCR and handwritten text recognition (HTR); single-node by design — see [ADR-001](docs/adr/001-ocr-python-microservice.md). Not part of the default dev stack (see Quick start below)
|
||||
- `infra/` — Gitea Actions CI/CD config; future home for infrastructure-as-code
|
||||
- `scripts/` — operational and data-pipeline helpers (`reset-db.sh`, `clean-e2e-data.sh`, import scripts)
|
||||
|
||||
---
|
||||
|
||||
## Quick start
|
||||
|
||||
**Prerequisites:** Java 21, Node 24, Docker with the `docker compose` plugin (V2).
|
||||
|
||||
### 1. Configure environment
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# The defaults in .env.example work for local development without changes.
|
||||
```
|
||||
|
||||
### 2. Start infrastructure
|
||||
|
||||
```bash
|
||||
# Starts PostgreSQL, MinIO (object storage), and Mailpit (dev mail catcher)
|
||||
docker compose up -d db minio mailpit
|
||||
```
|
||||
|
||||
### 3. Start the backend
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
./mvnw spring-boot:run
|
||||
# Starts on http://localhost:8080
|
||||
# API docs (dev profile, auto-enabled): http://localhost:8080/v3/api-docs
|
||||
```
|
||||
|
||||
### 4. Start the frontend
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
npm install
|
||||
npm run dev
|
||||
# Starts on http://localhost:5173
|
||||
```
|
||||
|
||||
Open **http://localhost:5173** — you should see the Familienarchiv login screen.
|
||||
|
||||
Default development credentials:
|
||||
|
||||
```
|
||||
# local dev only — change before any network-exposed deployment
|
||||
Email: admin@familyarchive.local
|
||||
Password: admin123
|
||||
```
|
||||
|
||||
> **Development setup only.** The default `docker compose` config exposes the database port and uses root MinIO credentials. Do not connect this to a network without first reading `docs/DEPLOYMENT.md` _(coming: [DOC-5, #399](http://heim-nas:3005/marcel/familienarchiv/issues/399))_.
|
||||
|
||||
### Running the full stack via Docker (optional)
|
||||
|
||||
To run everything including the backend and frontend in containers:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Note: the OCR service (`ocr-service/`) builds its Docker image locally and downloads ~6 GB of ML models on first start. Expect 30–60 minutes on a first run. The rest of the stack starts independently; OCR can be excluded with `--scale ocr-service=0` on memory-constrained machines (requires ≥ 12 GB RAM).
|
||||
|
||||
---
|
||||
|
||||
## Where to go next
|
||||
|
||||
| Resource | Purpose |
|
||||
|---|---|
|
||||
| [docs/architecture/c4-diagrams.md](docs/architecture/c4-diagrams.md) | C4 container and component diagrams (current system view) |
|
||||
| [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) _(coming: [DOC-2, #396](http://heim-nas:3005/marcel/familienarchiv/issues/396))_ | Full architecture guide with domain list |
|
||||
| [docs/GLOSSARY.md](docs/GLOSSARY.md) | Overloaded terms: Person vs AppUser, Chronik vs Aktivität, etc. |
|
||||
| [CONTRIBUTING.md](CONTRIBUTING.md) _(coming: [DOC-4, #398](http://heim-nas:3005/marcel/familienarchiv/issues/398))_ | How to add a domain, endpoint, or SvelteKit route |
|
||||
| [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md) _(coming: [DOC-5, #399](http://heim-nas:3005/marcel/familienarchiv/issues/399))_ | Production deployment checklist and secrets guide |
|
||||
| [docs/adr/](docs/adr/) | Architecture Decision Records — the "why" behind key choices |
|
||||
| [Gitea issue tracker](http://heim-nas:3005/marcel/familienarchiv/issues) _(internal — home network only)_ | Bug reports, feature requests, and project planning |
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
Private project — all rights reserved. Not licensed for redistribution.
|
||||
@@ -11,7 +11,7 @@ Spring Boot 4.0 monolith serving the Familienarchiv REST API. Handles document m
|
||||
- **Server**: Jetty (not Tomcat — excluded in pom.xml)
|
||||
- **Data**: PostgreSQL 16, JPA/Hibernate, Spring Data JPA
|
||||
- **Migrations**: Flyway (SQL files in `src/main/resources/db/migration/`)
|
||||
- **Security**: Spring Security, Spring Session JDBC, JWT tokens
|
||||
- **Security**: Spring Security, Spring Session JDBC
|
||||
- **File Storage**: MinIO via AWS SDK v2 (S3-compatible)
|
||||
- **Spreadsheet Import**: Apache POI 5.5.0 (Excel/ODS)
|
||||
- **API Docs**: SpringDoc OpenAPI 3.x (`/v3/api-docs` — dev profile only)
|
||||
@@ -19,7 +19,7 @@ Spring Boot 4.0 monolith serving the Familienarchiv REST API. Handles document m
|
||||
|
||||
## Package Structure
|
||||
|
||||
Package-by-domain: each domain owns its controller, service, repository, entities, and DTOs.
|
||||
<!-- TODO: rewrite post-REFACTOR-1 — see Epic 4 -->
|
||||
|
||||
```
|
||||
src/main/java/org/raddatz/familienarchiv/
|
||||
@@ -43,31 +43,28 @@ src/main/java/org/raddatz/familienarchiv/
|
||||
└── user/ # User domain — AppUser, UserGroup, UserService, auth controllers
|
||||
```
|
||||
|
||||
## Layering Rules (Strict)
|
||||
For per-domain ownership and public surface, see each domain's `README.md`.
|
||||
|
||||
```
|
||||
Controller → Service → Repository → DB
|
||||
```
|
||||
## Layering Rules
|
||||
|
||||
- **Controllers never call repositories directly.**
|
||||
- **Services never reach into another domain's repository.** Call the other domain's service instead.
|
||||
- ✅ `DocumentService` → `PersonService.getById()` → `PersonRepository`
|
||||
- ❌ `DocumentService` → `PersonRepository` directly
|
||||
→ See [docs/ARCHITECTURE.md §Layering rule](../docs/ARCHITECTURE.md#layering-rule)
|
||||
|
||||
**LLM reminder:** controllers never call repositories directly; services never reach into another domain's repository — always call the other domain's service.
|
||||
|
||||
## Key Entities
|
||||
|
||||
| Entity | Table | Key Relationships |
|
||||
|---|---|---|
|
||||
| `Document` | `documents` | ManyToOne sender (Person), ManyToMany receivers (Person), ManyToMany tags (Tag) |
|
||||
| `Person` | `persons` | Referenced by documents as sender/receiver; name aliases table |
|
||||
| `Tag` | `tag` | ManyToMany with documents via `document_tags`; self-referencing parent for tree |
|
||||
| `AppUser` | `app_users` | ManyToMany groups (UserGroup) |
|
||||
| `UserGroup` | `user_groups` | Has a `Set<String> permissions` |
|
||||
| `TranscriptionBlock` | `transcription_blocks` | Per-document, per-page text blocks with polygons |
|
||||
| `DocumentAnnotation` | `document_annotations` | Free-form annotations on document pages |
|
||||
| `Comment` | `document_comments` | Threaded comments with mentions |
|
||||
| `Notification` | `notifications` | User notification feed |
|
||||
| `OcrJob` / `OcrJobDocument` | `ocr_jobs`, `ocr_job_documents` | Batch OCR job tracking |
|
||||
| Entity | Table | Key Relationships |
|
||||
| --------------------------- | ------------------------------- | ------------------------------------------------------------------------------- |
|
||||
| `Document` | `documents` | ManyToOne sender (Person), ManyToMany receivers (Person), ManyToMany tags (Tag) |
|
||||
| `Person` | `persons` | Referenced by documents as sender/receiver; name aliases table |
|
||||
| `Tag` | `tag` | ManyToMany with documents via `document_tags`; self-referencing parent for tree |
|
||||
| `AppUser` | `app_users` | ManyToMany groups (UserGroup) |
|
||||
| `UserGroup` | `user_groups` | Has a `Set<String> permissions` |
|
||||
| `TranscriptionBlock` | `transcription_blocks` | Per-document, per-page text blocks with polygons |
|
||||
| `DocumentAnnotation` | `document_annotations` | Free-form annotations on document pages |
|
||||
| `Comment` | `document_comments` | Threaded comments with mentions |
|
||||
| `Notification` | `notifications` | User notification feed |
|
||||
| `OcrJob` / `OcrJobDocument` | `ocr_jobs`, `ocr_job_documents` | Batch OCR job tracking |
|
||||
|
||||
**`DocumentStatus` lifecycle:** `PLACEHOLDER → UPLOADED → TRANSCRIBED → REVIEWED → ARCHIVED`
|
||||
|
||||
@@ -104,32 +101,15 @@ public class MyEntity {
|
||||
|
||||
## Error Handling
|
||||
|
||||
Use `DomainException` for all domain errors:
|
||||
→ See [CONTRIBUTING.md §Error handling](../CONTRIBUTING.md#error-handling)
|
||||
|
||||
```java
|
||||
DomainException.notFound(ErrorCode.DOCUMENT_NOT_FOUND, "...")
|
||||
DomainException.forbidden("...")
|
||||
DomainException.conflict(ErrorCode.IMPORT_ALREADY_RUNNING, "...")
|
||||
DomainException.internal(ErrorCode.FILE_UPLOAD_FAILED, "...")
|
||||
```
|
||||
|
||||
When adding a new `ErrorCode`:
|
||||
1. Add to `ErrorCode.java`
|
||||
2. Mirror in frontend `src/lib/errors.ts`
|
||||
3. Add Paraglide translation key in `messages/{de,en,es}.json`
|
||||
**LLM reminder:** use `DomainException.notFound/forbidden/conflict/internal()` — never throw raw exceptions from service methods. For simple controller validation (not domain logic), `ResponseStatusException` is acceptable: `throw new ResponseStatusException(HttpStatus.BAD_REQUEST, "…")`. When adding a new `ErrorCode`: add to `ErrorCode.java`, mirror in `frontend/src/lib/shared/errors.ts`, add i18n keys in `messages/{de,en,es}.json`.
|
||||
|
||||
## Security / Permissions
|
||||
|
||||
Use `@RequirePermission` on controller methods or classes:
|
||||
→ See [docs/ARCHITECTURE.md §Permission system](../docs/ARCHITECTURE.md#permission-system)
|
||||
|
||||
```java
|
||||
@RequirePermission(Permission.WRITE_ALL)
|
||||
public Document updateDocument(...) { ... }
|
||||
```
|
||||
|
||||
Available permissions: `READ_ALL`, `WRITE_ALL`, `ADMIN`, `ADMIN_USER`, `ADMIN_TAG`, `ADMIN_PERMISSION`
|
||||
|
||||
`PermissionAspect` checks the current user's `UserGroup.permissions` at runtime.
|
||||
**LLM reminder:** `@RequirePermission(Permission.WRITE_ALL)` is **required** on every `POST`, `PUT`, `PATCH`, `DELETE` endpoint — not optional. Do not mix with Spring Security's `@PreAuthorize`. Available permissions: `READ_ALL`, `WRITE_ALL`, `ADMIN`, `ADMIN_USER`, `ADMIN_TAG`, `ADMIN_PERMISSION`, `ANNOTATE_ALL`, `BLOG_WRITE`.
|
||||
|
||||
## OCR Integration
|
||||
|
||||
@@ -141,49 +121,35 @@ The backend orchestrates OCR by calling the Python `ocr-service` microservice vi
|
||||
- `OcrBatchService` — handles batch/job workflows
|
||||
- `OcrAsyncRunner` — async execution of OCR jobs
|
||||
|
||||
For ocr-service internals, see [`ocr-service/README.md`](../ocr-service/README.md).
|
||||
|
||||
## API Testing
|
||||
|
||||
HTTP test files in `backend/api_tests/` for the VS Code REST Client extension.
|
||||
|
||||
## How to Run
|
||||
|
||||
### Local Development
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
|
||||
# Run with dev profile (requires PostgreSQL + MinIO running via docker-compose)
|
||||
./mvnw spring-boot:run
|
||||
|
||||
# Build JAR (with tests)
|
||||
./mvnw clean package
|
||||
|
||||
# Build JAR skipping tests
|
||||
./mvnw spring-boot:run # Run with dev profile (requires PostgreSQL + MinIO)
|
||||
./mvnw clean package # Build JAR (with tests)
|
||||
./mvnw clean package -DskipTests
|
||||
|
||||
# Run all tests
|
||||
./mvnw test
|
||||
|
||||
# Run a single test class
|
||||
./mvnw test -Dtest=ClassName
|
||||
|
||||
# Run with coverage (JaCoCo)
|
||||
./mvnw clean verify
|
||||
./mvnw test # Run all tests
|
||||
./mvnw test -Dtest=ClassName # Run a single test class
|
||||
./mvnw clean verify # Run with JaCoCo coverage report
|
||||
```
|
||||
|
||||
### OpenAPI TypeScript Generation
|
||||
**OpenAPI / TypeScript type generation:**
|
||||
|
||||
1. Build and start backend with `--spring.profiles.active=dev`
|
||||
2. In `frontend/`, run: `npm run generate:api`
|
||||
1. Start backend with `--spring.profiles.active=dev`
|
||||
2. In `frontend/`: `npm run generate:api`
|
||||
|
||||
### Profiles
|
||||
|
||||
- **dev** (default): Enables OpenAPI, dev configs, e2e seeds
|
||||
- **prod**: Production profile — no dev endpoints
|
||||
**LLM reminder:** always regenerate types after any model or endpoint change — the most common cause of "where did my TypeScript type go?"
|
||||
|
||||
## Testing
|
||||
|
||||
- Unit tests: Mockito + JUnit, pure in-memory
|
||||
- Slice tests: `@WebMvcTest`, `@DataJpaTest` with Testcontainers PostgreSQL
|
||||
- Integration tests: Full Spring context with Testcontainers
|
||||
- Coverage gate: 88% branch coverage overall (JaCoCo)
|
||||
- Coverage gate: 88% branch coverage (JaCoCo)
|
||||
|
||||
@@ -190,6 +190,13 @@
|
||||
<artifactId>owasp-java-html-sanitizer</artifactId>
|
||||
<version>20240325.1</version>
|
||||
</dependency>
|
||||
|
||||
<!-- HTML → plain-text extraction for comment previews -->
|
||||
<dependency>
|
||||
<groupId>org.jsoup</groupId>
|
||||
<artifactId>jsoup</artifactId>
|
||||
<version>1.18.1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
# audit
|
||||
|
||||
Append-only event store for all domain mutations. Every write across the application produces an `audit_log` row. The activity feed and Family Pulse dashboard aggregate from this table.
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Table: `audit_log` (append-only by convention — no UPDATE or DELETE in application code).
|
||||
Features: log mutations, query activity feed, query per-entity history.
|
||||
|
||||
**Admission criteria (why this is cross-cutting, not a Tier-1 domain):** consumed by 5+ domains; has no user-facing CRUD of its own; the data model is fixed (event log, not a business entity).
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
Nothing beyond the log table. `audit/` is an infrastructure layer, not a business domain.
|
||||
|
||||
## Public surface (called from other domains)
|
||||
|
||||
| Method | Consumer | Purpose |
|
||||
|---|---|---|
|
||||
| `logAfterCommit(event)` | document, person, user, ocr, geschichte | Record a mutation event after the DB transaction commits |
|
||||
|
||||
`logAfterCommit` is the only write-path. Query paths (`AuditLogQueryService`) are consumed by `dashboard/` and the activity feed route.
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `AuditService` — `logAfterCommit()` (write)
|
||||
- `AuditLogQueryService` — query by entity, by user, for the activity feed
|
||||
- `AuditLog` (entity) → table `audit_log`
|
||||
- `AuditLogRepository`
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
None. `audit/` is consumed by other domains; it does not call out to any of them.
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
No direct frontend counterpart. Audit data surfaces in the `activity/` and `conversation/` frontend domains via the dashboard API.
|
||||
@@ -29,5 +29,11 @@ public record ActivityFeedItemDTO(
|
||||
requiredMode = Schema.RequiredMode.NOT_REQUIRED,
|
||||
description = "Annotation associated with the comment; populated only for COMMENT_ADDED and MENTION_CREATED kinds."
|
||||
)
|
||||
UUID annotationId
|
||||
UUID annotationId,
|
||||
@Nullable
|
||||
@Schema(
|
||||
requiredMode = Schema.RequiredMode.NOT_REQUIRED,
|
||||
description = "Plain-text preview of the comment body (HTML stripped server-side, truncated to 120 chars); null for non-comment feed items or deleted comments."
|
||||
)
|
||||
String commentPreview
|
||||
) {}
|
||||
|
||||
@@ -12,6 +12,7 @@ import org.raddatz.familienarchiv.document.Document;
|
||||
import org.raddatz.familienarchiv.person.Person;
|
||||
import org.raddatz.familienarchiv.document.transcription.TranscriptionBlock;
|
||||
import org.raddatz.familienarchiv.document.comment.CommentService;
|
||||
import org.raddatz.familienarchiv.document.comment.CommentData;
|
||||
import org.raddatz.familienarchiv.document.DocumentService;
|
||||
import org.raddatz.familienarchiv.document.transcription.TranscriptionService;
|
||||
import org.raddatz.familienarchiv.user.UserService;
|
||||
@@ -133,9 +134,9 @@ public class DashboardService {
|
||||
.filter(Objects::nonNull)
|
||||
.distinct()
|
||||
.toList();
|
||||
Map<UUID, UUID> annotationByComment = commentIds.isEmpty()
|
||||
Map<UUID, CommentData> commentDataByComment = commentIds.isEmpty()
|
||||
? Map.of()
|
||||
: commentService.findAnnotationIdsByIds(commentIds);
|
||||
: commentService.findDataByIds(commentIds);
|
||||
|
||||
return rows.stream().map(row -> {
|
||||
ActivityActorDTO actor = row.getActorId() != null
|
||||
@@ -146,7 +147,10 @@ public class DashboardService {
|
||||
? row.getHappenedAtUntil().atOffset(ZoneOffset.UTC)
|
||||
: null;
|
||||
UUID commentId = row.getCommentId();
|
||||
UUID annotationId = commentId != null ? annotationByComment.get(commentId) : null;
|
||||
CommentData commentData = commentId != null ? commentDataByComment.get(commentId) : null;
|
||||
UUID annotationId = commentData != null ? commentData.annotationId() : null;
|
||||
String commentPreview = commentData != null && !commentData.preview().isBlank()
|
||||
? commentData.preview() : null;
|
||||
return new ActivityFeedItemDTO(
|
||||
org.raddatz.familienarchiv.audit.AuditKind.valueOf(row.getKind()),
|
||||
actor,
|
||||
@@ -158,7 +162,8 @@ public class DashboardService {
|
||||
row.getCount(),
|
||||
happenedAtUntil,
|
||||
commentId,
|
||||
annotationId
|
||||
annotationId,
|
||||
commentPreview
|
||||
);
|
||||
}).toList();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
# dashboard
|
||||
|
||||
Stats aggregation for the admin dashboard and the Family Pulse widget. This is a derived domain — it has no tables of its own; all data is computed on-the-fly from Tier-1 domain data.
|
||||
|
||||
## What this domain owns
|
||||
|
||||
No entities. Routes: `/api/dashboard/*`, `/api/stats/*`.
|
||||
Features: document counts, person counts, publication stats, weekly activity data, incomplete-document list, enrichment queue, Family Pulse widget data, admin statistics.
|
||||
|
||||
**Admission criteria (cross-cutting):** aggregates from 3+ domains; no owned entities.
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
None of the underlying data — it reads from `document/`, `person/`, `audit/`, `notification/`, `geschichte/`.
|
||||
|
||||
## Public surface
|
||||
|
||||
`dashboard/` is a leaf domain — no other domain calls its services. It is the aggregator, not the aggregated.
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `StatsController` — REST under `/api/stats`
|
||||
- `DashboardController` — REST under `/api/dashboard`
|
||||
- `StatsService` — aggregated counts (documents, persons, geschichten, incomplete, etc.)
|
||||
- `DashboardService` — activity feed composition, Family Pulse data
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
- `DocumentService.count()` — total document count (StatsService)
|
||||
- `DocumentService.getDocumentById(UUID)` / `getDocumentsByIds(List<UUID>)` — document enrichment for activity feed (DashboardService)
|
||||
- `PersonService.count()` — total person count (StatsService)
|
||||
- `TranscriptionService.listBlocks(UUID)` — transcription block lookup for resume widget (DashboardService)
|
||||
- `UserService.getById(UUID)` — actor name resolution in activity feed (DashboardService)
|
||||
- `CommentService.findAnnotationIdsByIds(...)` — annotation context lookup for activity feed (DashboardService)
|
||||
- `AuditLogQueryService.findMostRecentDocumentForUser()` / `getPulseStats()` / `findActivityFeed()` — audit-sourced feed rows (DashboardService)
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
Activity feed and Pulse widget are assembled in `frontend/src/lib/shared/dashboard/` and in the `aktivitaeten` route; no dedicated `dashboard/` lib folder.
|
||||
@@ -1,7 +1,12 @@
|
||||
package org.raddatz.familienarchiv.dashboard;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
/**
|
||||
* Aggregate counts for the dashboard/persons stats bar.
|
||||
*/
|
||||
public record StatsDTO(long totalPersons, long totalDocuments) {
|
||||
public record StatsDTO(
|
||||
@Schema(requiredMode = Schema.RequiredMode.REQUIRED) long totalPersons,
|
||||
@Schema(requiredMode = Schema.RequiredMode.REQUIRED) long totalDocuments,
|
||||
@Schema(requiredMode = Schema.RequiredMode.REQUIRED) long totalStories) {
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package org.raddatz.familienarchiv.dashboard;
|
||||
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.raddatz.familienarchiv.document.DocumentService;
|
||||
import org.raddatz.familienarchiv.geschichte.GeschichteService;
|
||||
import org.raddatz.familienarchiv.person.PersonService;
|
||||
import org.raddatz.familienarchiv.dashboard.StatsDTO;
|
||||
import org.springframework.stereotype.Service;
|
||||
@@ -12,8 +13,9 @@ public class StatsService {
|
||||
|
||||
private final PersonService personService;
|
||||
private final DocumentService documentService;
|
||||
private final GeschichteService geschichteService;
|
||||
|
||||
public StatsDTO getStats() {
|
||||
return new StatsDTO(personService.count(), documentService.count());
|
||||
return new StatsDTO(personService.count(), documentService.count(), geschichteService.countPublished());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
import org.raddatz.familienarchiv.tag.TagOperator;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* The non-date filters honoured by {@link DocumentService#getDensity(DensityFilters)}.
|
||||
* Date bounds (from/to) are deliberately excluded — see the service Javadoc for why.
|
||||
*
|
||||
* Kept as a record so the seven values are passed as one named bundle instead of a
|
||||
* positional argument list where two UUIDs (sender vs. receiver) can be swapped by
|
||||
* accident at the call site.
|
||||
*/
|
||||
public record DensityFilters(
|
||||
String text,
|
||||
UUID sender,
|
||||
UUID receiver,
|
||||
List<String> tags,
|
||||
String tagQ,
|
||||
DocumentStatus status,
|
||||
TagOperator tagOperator) {}
|
||||
@@ -3,6 +3,7 @@ package org.raddatz.familienarchiv.document;
|
||||
import java.io.IOException;
|
||||
import java.time.LocalDate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@@ -48,6 +49,7 @@ import org.raddatz.familienarchiv.filestorage.FileService;
|
||||
import org.raddatz.familienarchiv.user.UserService;
|
||||
import org.springframework.data.domain.Sort;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.http.CacheControl;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
@@ -388,6 +390,23 @@ public class DocumentController {
|
||||
return ResponseEntity.ok(documentService.searchDocuments(q, from, to, senderId, receiverId, tags, tagQ, status, sort, dir, operator, pageable));
|
||||
}
|
||||
|
||||
@GetMapping(value = "/density", produces = MediaType.APPLICATION_JSON_VALUE)
|
||||
public ResponseEntity<DocumentDensityResult> density(
|
||||
@RequestParam(required = false) String q,
|
||||
@RequestParam(required = false) UUID senderId,
|
||||
@RequestParam(required = false) UUID receiverId,
|
||||
@RequestParam(required = false, name = "tag") List<String> tags,
|
||||
@RequestParam(required = false) String tagQ,
|
||||
@Parameter(description = "Filter by document status") @RequestParam(required = false) DocumentStatus status,
|
||||
@Parameter(description = "Tag operator: AND (default) or OR") @RequestParam(required = false) String tagOp) {
|
||||
TagOperator operator = "OR".equalsIgnoreCase(tagOp) ? TagOperator.OR : TagOperator.AND;
|
||||
DocumentDensityResult result = documentService.getDensity(
|
||||
new DensityFilters(q, senderId, receiverId, tags, tagQ, status, operator));
|
||||
return ResponseEntity.ok()
|
||||
.cacheControl(CacheControl.maxAge(5, TimeUnit.MINUTES).cachePrivate())
|
||||
.body(result);
|
||||
}
|
||||
|
||||
// --- TRAINING LABELS ---
|
||||
|
||||
public record TrainingLabelRequest(String label, boolean enrolled) {}
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import java.time.LocalDate;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Result of the timeline density aggregation.
|
||||
*
|
||||
* <p>{@code minDate} / {@code maxDate} are intentionally not marked
|
||||
* {@code @Schema(requiredMode = REQUIRED)} — the empty-result case (no
|
||||
* documents match the filter) returns them as {@code null}, which surfaces in
|
||||
* the generated TypeScript as {@code minDate?: string | null}. Frontend code
|
||||
* must treat them as optional.
|
||||
*/
|
||||
public record DocumentDensityResult(
|
||||
@Schema(requiredMode = Schema.RequiredMode.REQUIRED)
|
||||
List<MonthBucket> buckets,
|
||||
LocalDate minDate,
|
||||
LocalDate maxDate
|
||||
) {
|
||||
/** The "no documents match the filter" result, with no buckets and null date bounds. */
|
||||
public static DocumentDensityResult empty() {
|
||||
return new DocumentDensityResult(List.of(), null, null);
|
||||
}
|
||||
}
|
||||
@@ -100,7 +100,45 @@ public interface DocumentRepository extends JpaRepository<Document, UUID>, JpaSp
|
||||
ORDER BY ts_rank(d.search_vector, q.pq) DESC,
|
||||
d.meta_date DESC NULLS LAST
|
||||
""")
|
||||
List<UUID> findRankedIdsByFts(@Param("query") String query);
|
||||
// Unpaged path — for bulk-edit "select all" and density chart
|
||||
List<UUID> findAllMatchingIdsByFts(@Param("query") String query);
|
||||
|
||||
/**
|
||||
* Returns one page of FTS-ranked document IDs with the total match count.
|
||||
*
|
||||
* <p>Each row contains (in column order):
|
||||
* <ol>
|
||||
* <li>UUID — document id</li>
|
||||
* <li>double — ts_rank score</li>
|
||||
* <li>long — COUNT(*) OVER () — full match count, not page count</li>
|
||||
* </ol>
|
||||
*
|
||||
* <p>Returns an empty list when the query matches no documents (including
|
||||
* stopword-only queries where websearch_to_tsquery returns an empty tsquery).
|
||||
* Use findAllMatchingIdsByFts for the unpaged bulk-edit path.
|
||||
*/
|
||||
@Query(nativeQuery = true, value = """
|
||||
WITH q AS (
|
||||
SELECT CASE WHEN websearch_to_tsquery('german', :query)::text <> ''
|
||||
THEN to_tsquery('simple', regexp_replace(
|
||||
websearch_to_tsquery('german', :query)::text,
|
||||
'''([^'']+)''',
|
||||
'''\\1'':*',
|
||||
'g'))
|
||||
END AS pq
|
||||
), matches AS (
|
||||
SELECT d.id, ts_rank(d.search_vector, q.pq) AS rank
|
||||
FROM documents d, q
|
||||
WHERE d.search_vector @@ q.pq
|
||||
)
|
||||
SELECT id, rank, COUNT(*) OVER () AS total
|
||||
FROM matches
|
||||
ORDER BY rank DESC, id
|
||||
OFFSET :offset LIMIT :limit
|
||||
""")
|
||||
List<Object[]> findFtsPageRaw(@Param("query") String query,
|
||||
@Param("offset") int offset,
|
||||
@Param("limit") int limit);
|
||||
|
||||
/**
|
||||
* Returns match-enrichment data for a set of documents identified by their IDs.
|
||||
|
||||
@@ -48,6 +48,7 @@ import java.io.IOException;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.time.LocalDate;
|
||||
import java.time.YearMonth;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
@@ -125,6 +126,74 @@ public class DocumentService {
|
||||
return titles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-month document counts for the timeline density widget (issue #385).
|
||||
*
|
||||
* <p>Filter-reactive: the chart recomputes when other filters (sender,
|
||||
* receiver, tag, q, status) change so it always matches the list it sits
|
||||
* above. Date bounds (`from`/`to`) are deliberately omitted — the chart is
|
||||
* the surface for picking those, so it must always span the broader space
|
||||
* the user is selecting within.
|
||||
*
|
||||
* <p>Implementation note: groups in memory rather than via SQL GROUP BY
|
||||
* because the existing {@link Specification} predicates compose easily
|
||||
* with {@code findAll(spec)} and the archive size (≈5k docs) keeps this
|
||||
* well under the 200ms p95 target. Cache-Control: max-age=300 on the
|
||||
* controller layer absorbs repeated browse loads.
|
||||
*
|
||||
* <p>Tracked in issue #481 for re-evaluation when {@code documents > 50k}
|
||||
* — at that scale move the aggregation into SQL (GROUP BY TO_CHAR(meta_date,
|
||||
* 'YYYY-MM')) and accept that the criteria/specification surface needs a
|
||||
* parallel native-query path.
|
||||
*/
|
||||
public DocumentDensityResult getDensity(DensityFilters filters) {
|
||||
List<UUID> ftsIds = resolveFtsIds(filters.text());
|
||||
if (ftsIds != null && ftsIds.isEmpty()) {
|
||||
return DocumentDensityResult.empty();
|
||||
}
|
||||
List<LocalDate> dates = loadFilteredDates(filters, ftsIds);
|
||||
return aggregateByMonth(dates);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the FTS-ranked document IDs when {@code text} is non-blank, or {@code null}
|
||||
* when no full-text query is active. An empty list means the FTS query ran but
|
||||
* matched zero documents — the caller short-circuits on that signal.
|
||||
*/
|
||||
private List<UUID> resolveFtsIds(String text) {
|
||||
if (!StringUtils.hasText(text)) return null;
|
||||
return documentRepository.findAllMatchingIdsByFts(text);
|
||||
}
|
||||
|
||||
/** Loads matching documents and projects to non-null {@link LocalDate}s. */
|
||||
private List<LocalDate> loadFilteredDates(DensityFilters filters, List<UUID> ftsIds) {
|
||||
boolean hasFts = ftsIds != null;
|
||||
Specification<Document> spec = buildSearchSpec(
|
||||
hasFts, ftsIds, null, null,
|
||||
filters.sender(), filters.receiver(),
|
||||
filters.tags(), filters.tagQ(),
|
||||
filters.status(), filters.tagOperator());
|
||||
return documentRepository.findAll(spec).stream()
|
||||
.map(Document::getDocumentDate)
|
||||
.filter(Objects::nonNull)
|
||||
.toList();
|
||||
}
|
||||
|
||||
/** Buckets {@code dates} into one {@link MonthBucket} per YYYY-MM and computes min/max. */
|
||||
private DocumentDensityResult aggregateByMonth(List<LocalDate> dates) {
|
||||
if (dates.isEmpty()) return DocumentDensityResult.empty();
|
||||
Map<String, Integer> counts = new java.util.TreeMap<>();
|
||||
for (LocalDate d : dates) {
|
||||
counts.merge(YearMonth.from(d).toString(), 1, Integer::sum);
|
||||
}
|
||||
List<MonthBucket> buckets = counts.entrySet().stream()
|
||||
.map(e -> new MonthBucket(e.getKey(), e.getValue()))
|
||||
.toList();
|
||||
LocalDate minDate = dates.stream().min(LocalDate::compareTo).orElse(null);
|
||||
LocalDate maxDate = dates.stream().max(LocalDate::compareTo).orElse(null);
|
||||
return new DocumentDensityResult(buckets, minDate, maxDate);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lädt eine Datei hoch.
|
||||
* - Prüft, ob ein Eintrag (aus Excel) schon existiert.
|
||||
@@ -416,7 +485,7 @@ public class DocumentService {
|
||||
boolean hasText = StringUtils.hasText(text);
|
||||
List<UUID> rankedIds = null;
|
||||
if (hasText) {
|
||||
rankedIds = documentRepository.findRankedIdsByFts(text);
|
||||
rankedIds = documentRepository.findAllMatchingIdsByFts(text);
|
||||
if (rankedIds.isEmpty()) return List.of();
|
||||
}
|
||||
|
||||
@@ -576,39 +645,43 @@ public class DocumentService {
|
||||
// 1. Allgemeine Suche (für das Suchfeld im Frontend)
|
||||
public DocumentSearchResult searchDocuments(String text, LocalDate from, LocalDate to, UUID sender, UUID receiver, List<String> tags, String tagQ, DocumentStatus status, DocumentSort sort, String dir, TagOperator tagOperator, Pageable pageable) {
|
||||
boolean hasText = StringUtils.hasText(text);
|
||||
List<UUID> rankedIds = null;
|
||||
|
||||
// Pure-text RELEVANCE: push pagination into SQL — skip findAllMatchingIdsByFts entirely (ADR-008).
|
||||
if (isPureTextRelevance(hasText, sort, from, to, sender, receiver, tags, tagQ, status)) {
|
||||
return relevanceSortedPageFromSql(text, pageable);
|
||||
}
|
||||
|
||||
List<UUID> rankedIds = null;
|
||||
if (hasText) {
|
||||
rankedIds = documentRepository.findRankedIdsByFts(text);
|
||||
rankedIds = documentRepository.findAllMatchingIdsByFts(text);
|
||||
if (rankedIds.isEmpty()) return DocumentSearchResult.of(List.of());
|
||||
}
|
||||
|
||||
Specification<Document> spec = buildSearchSpec(
|
||||
hasText, rankedIds, from, to, sender, receiver, tags, tagQ, status, tagOperator);
|
||||
|
||||
// SENDER, RECEIVER and RELEVANCE sorts load the full match set and slice in memory.
|
||||
// SENDER and RECEIVER sorts load the full match set and slice in-memory.
|
||||
// JPA's Sort.by("sender.lastName") generates an INNER JOIN that silently drops
|
||||
// documents with null sender/receivers; RELEVANCE maps a DB order to an external
|
||||
// rank list. Cost scales linearly with match count — acceptable while documents
|
||||
// stays under ~10k rows. Past that, replace with SQL-level LEFT JOIN sort.
|
||||
// documents with null sender/receivers. Cost scales with match count —
|
||||
// acceptable while documents stays under ~10k rows. (ADR-008)
|
||||
if (sort == DocumentSort.RECEIVER) {
|
||||
// In-memory sort on page slice (≤ page size rows) — acceptable
|
||||
List<Document> sorted = sortByFirstReceiver(documentRepository.findAll(spec), dir);
|
||||
return buildResultPaged(pageSlice(sorted, pageable), text, pageable, sorted.size());
|
||||
}
|
||||
if (sort == DocumentSort.SENDER) {
|
||||
// In-memory sort on page slice (≤ page size rows) — acceptable
|
||||
List<Document> sorted = sortBySender(documentRepository.findAll(spec), dir);
|
||||
return buildResultPaged(pageSlice(sorted, pageable), text, pageable, sorted.size());
|
||||
}
|
||||
|
||||
// RELEVANCE: default when text present and no explicit sort given
|
||||
// RELEVANCE with active filters: load filtered subset and sort in-memory by rank.
|
||||
boolean useRankOrder = hasText && (sort == null || sort == DocumentSort.RELEVANCE);
|
||||
if (useRankOrder) {
|
||||
List<Document> results = documentRepository.findAll(spec);
|
||||
Map<UUID, Integer> rankMap = new HashMap<>();
|
||||
for (int i = 0; i < rankedIds.size(); i++) rankMap.put(rankedIds.get(i), i);
|
||||
List<Document> sorted = results.stream()
|
||||
.sorted(Comparator.comparingInt(
|
||||
doc -> rankMap.getOrDefault(doc.getId(), Integer.MAX_VALUE)))
|
||||
List<Document> sorted = documentRepository.findAll(spec).stream()
|
||||
.sorted(Comparator.comparingInt(doc -> rankMap.getOrDefault(doc.getId(), Integer.MAX_VALUE)))
|
||||
.toList();
|
||||
return buildResultPaged(pageSlice(sorted, pageable), text, pageable, sorted.size());
|
||||
}
|
||||
@@ -619,6 +692,39 @@ public class DocumentService {
|
||||
return buildResultPaged(page.getContent(), text, pageable, page.getTotalElements());
|
||||
}
|
||||
|
||||
private static boolean isPureTextRelevance(boolean hasText, DocumentSort sort,
|
||||
LocalDate from, LocalDate to, UUID sender, UUID receiver,
|
||||
List<String> tags, String tagQ, DocumentStatus status) {
|
||||
return hasText && (sort == null || sort == DocumentSort.RELEVANCE)
|
||||
&& from == null && to == null && sender == null && receiver == null
|
||||
&& (tags == null || tags.isEmpty()) && (tagQ == null || tagQ.isBlank()) && status == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pure-text RELEVANCE path — pagination and ts_rank ordering pushed into SQL.
|
||||
* Called when no non-text filters are active (ADR-008).
|
||||
*/
|
||||
private DocumentSearchResult relevanceSortedPageFromSql(String text, Pageable pageable) {
|
||||
long rawOffset = pageable.getOffset();
|
||||
if (rawOffset > Integer.MAX_VALUE) return DocumentSearchResult.of(List.of());
|
||||
int offset = (int) rawOffset;
|
||||
int limit = pageable.getPageSize();
|
||||
FtsPage ftsPage = toFtsPage(documentRepository.findFtsPageRaw(text, offset, limit));
|
||||
if (ftsPage.hits().isEmpty()) return DocumentSearchResult.of(List.of());
|
||||
|
||||
// Preserve ts_rank order from SQL across the JPA findAllById call.
|
||||
Map<UUID, Integer> rankMap = new HashMap<>();
|
||||
List<UUID> pageIds = new ArrayList<>();
|
||||
for (int i = 0; i < ftsPage.hits().size(); i++) {
|
||||
rankMap.put(ftsPage.hits().get(i).id(), i);
|
||||
pageIds.add(ftsPage.hits().get(i).id());
|
||||
}
|
||||
List<Document> docs = documentRepository.findAllById(pageIds).stream()
|
||||
.sorted(Comparator.comparingInt(d -> rankMap.getOrDefault(d.getId(), Integer.MAX_VALUE)))
|
||||
.toList();
|
||||
return buildResultPaged(docs, text, pageable, ftsPage.total());
|
||||
}
|
||||
|
||||
private static <T> List<T> pageSlice(List<T> sorted, Pageable pageable) {
|
||||
int from = Math.min((int) pageable.getOffset(), sorted.size());
|
||||
int to = Math.min(from + pageable.getPageSize(), sorted.size());
|
||||
@@ -658,6 +764,7 @@ public class DocumentService {
|
||||
return switch (sort) {
|
||||
case TITLE -> Sort.by(direction, "title");
|
||||
case UPLOAD_DATE -> Sort.by(direction, "createdAt");
|
||||
case UPDATED_AT -> Sort.by(direction, "updatedAt");
|
||||
default -> Sort.by(direction, "documentDate");
|
||||
};
|
||||
}
|
||||
@@ -943,6 +1050,28 @@ public class DocumentService {
|
||||
return result;
|
||||
}
|
||||
|
||||
private static final int COL_ID = 0;
|
||||
private static final int COL_RANK = 1;
|
||||
private static final int COL_TOTAL = 2;
|
||||
|
||||
/**
|
||||
* Maps raw Object[] rows from {@link DocumentRepository#findFtsPageRaw} to an
|
||||
* {@link FtsPage}. Uses pattern-matching UUID cast to guard against driver-level
|
||||
* type variance (some JDBC drivers return UUID as String).
|
||||
*/
|
||||
private static FtsPage toFtsPage(List<Object[]> rows) {
|
||||
if (rows.isEmpty()) return new FtsPage(List.of(), 0);
|
||||
long total = ((Number) rows.get(0)[COL_TOTAL]).longValue();
|
||||
List<FtsHit> hits = rows.stream()
|
||||
.map(r -> {
|
||||
UUID id = r[COL_ID] instanceof UUID u ? u : UUID.fromString(r[COL_ID].toString());
|
||||
double rank = ((Number) r[COL_RANK]).doubleValue();
|
||||
return new FtsHit(id, rank);
|
||||
})
|
||||
.toList();
|
||||
return new FtsPage(hits, total);
|
||||
}
|
||||
|
||||
/** Clean text + highlight offsets parsed from a {@code ts_headline} sentinel-delimited string. */
|
||||
public record ParsedHighlight(String cleanText, List<MatchOffset> offsets) {}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
public enum DocumentSort {
|
||||
DATE, TITLE, SENDER, RECEIVER, UPLOAD_DATE, RELEVANCE
|
||||
DATE, TITLE, SENDER, RECEIVER, UPLOAD_DATE, UPDATED_AT, RELEVANCE
|
||||
}
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
/** A single document hit from a paginated FTS query — id and its ts_rank score. */
|
||||
record FtsHit(UUID id, double rank) {}
|
||||
@@ -0,0 +1,6 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/** One page of FTS results — the ranked hit list for this page and the total match count. */
|
||||
record FtsPage(List<FtsHit> hits, long total) {}
|
||||
@@ -0,0 +1,10 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
public record MonthBucket(
|
||||
@Schema(requiredMode = Schema.RequiredMode.REQUIRED, example = "1915-08")
|
||||
String month,
|
||||
@Schema(requiredMode = Schema.RequiredMode.REQUIRED)
|
||||
int count
|
||||
) {}
|
||||
@@ -0,0 +1,50 @@
|
||||
# document
|
||||
|
||||
The archive's core concept. A `Document` represents one physical artefact (a letter, a postcard, a photo) stored in MinIO and described by metadata.
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Entities: `Document`, `DocumentVersion`, `TranscriptionBlock`, `DocumentAnnotation`, `DocumentComment`.
|
||||
Features: document CRUD, file upload/download, full-text search, bulk editing, transcription workflows, annotation canvas, threaded comments, thumbnail generation (PDFBox).
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
- `Person` (sender / receivers) — referenced by ID, resolved via `PersonService`
|
||||
- `Tag` — referenced by ID; the join is on the document side but tags are owned by `tag/`
|
||||
- `AppUser` — comments reference `AppUser` IDs, but user management lives in `user/`
|
||||
- OCR processing — `ocr/` orchestrates jobs; `ocr-service/` executes them
|
||||
|
||||
## Public surface (called from other domains)
|
||||
|
||||
| Method | Consumer | Purpose |
|
||||
|---|---|---|
|
||||
| `getDocumentById(UUID)` | ocr, notification | Fetch a single document |
|
||||
| `getDocumentsByIds(List<UUID>)` | ocr | Bulk fetch for OCR job |
|
||||
| `findByOriginalFilename(String)` | importing | Deduplication during mass import |
|
||||
| `deleteTagCascading(UUID tagId)` | tag | Remove a tag from all documents before deleting it |
|
||||
| `findWeeklyStats()` | dashboard | Activity data for Family Pulse widget |
|
||||
| `count()` | dashboard | Total document count for stats |
|
||||
| `addTrainingLabel(...)` | ocr | Attach a confirmed sender label to a document |
|
||||
| `findSegmentationQueue(int limit)` / `findTranscriptionQueue(int limit)` / `findReadyToReadQueue(int limit)` | ocr | OCR pipeline queues |
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `DocumentController` — REST under `/api/documents`
|
||||
- `DocumentService` — CRUD, search (JPA Specifications), bulk edit
|
||||
- `DocumentRepository` — includes bidirectional conversation-thread query
|
||||
- `DocumentSpecifications` — composable `Specification` predicates for search
|
||||
- `DocumentVersionService` / `DocumentVersionRepository` — append-only version history
|
||||
- `ThumbnailService` + `ThumbnailAsyncRunner` — PDFBox thumbnail generation (separate thread pool)
|
||||
- Sub-packages: `annotation/`, `comment/`, `transcription/`
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
- `PersonService.getById()` / `getAllById()` — resolve sender and receivers
|
||||
- `TagService.expandTagNamesToDescendantIdSets()` — tag filter expansion
|
||||
- `FileService.uploadFile()` / `downloadFile()` / `generatePresignedUrl()` — S3 I/O
|
||||
- `NotificationService.notifyMentions()` / `.notifyReply()` — comment mentions
|
||||
- `AuditService.logAfterCommit()` — every mutation is audited
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
`frontend/src/lib/document/README.md`
|
||||
@@ -27,7 +27,9 @@ public class CommentController {
|
||||
// ─── Block (transcription) comments ────────────────────────────────────────
|
||||
|
||||
@GetMapping("/api/documents/{documentId}/transcription-blocks/{blockId}/comments")
|
||||
public List<DocumentComment> getBlockComments(@PathVariable UUID blockId) {
|
||||
public List<DocumentComment> getBlockComments(
|
||||
@PathVariable UUID documentId,
|
||||
@PathVariable UUID blockId) {
|
||||
return commentService.getCommentsForBlock(blockId);
|
||||
}
|
||||
|
||||
@@ -48,6 +50,7 @@ public class CommentController {
|
||||
@RequirePermission({Permission.ANNOTATE_ALL, Permission.WRITE_ALL})
|
||||
public DocumentComment replyToBlockComment(
|
||||
@PathVariable UUID documentId,
|
||||
@PathVariable UUID blockId,
|
||||
@PathVariable UUID commentId,
|
||||
@RequestBody CreateCommentDTO dto,
|
||||
Authentication authentication) {
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
package org.raddatz.familienarchiv.document.comment;
|
||||
|
||||
import jakarta.annotation.Nullable;
|
||||
import java.util.UUID;
|
||||
|
||||
public record CommentData(@Nullable UUID annotationId, String preview) {}
|
||||
@@ -13,6 +13,7 @@ import org.raddatz.familienarchiv.document.comment.DocumentComment;
|
||||
import org.raddatz.familienarchiv.document.transcription.TranscriptionBlock;
|
||||
import org.raddatz.familienarchiv.document.comment.CommentRepository;
|
||||
import org.raddatz.familienarchiv.notification.NotificationService;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
@@ -28,21 +29,29 @@ import java.util.UUID;
|
||||
@RequiredArgsConstructor
|
||||
public class CommentService {
|
||||
|
||||
private static final int PREVIEW_MAX_CHARS = 120;
|
||||
|
||||
private final CommentRepository commentRepository;
|
||||
private final UserService userService;
|
||||
private final NotificationService notificationService;
|
||||
private final AuditService auditService;
|
||||
private final TranscriptionService transcriptionService;
|
||||
|
||||
public Map<UUID, UUID> findAnnotationIdsByIds(Collection<UUID> commentIds) {
|
||||
public Map<UUID, CommentData> findDataByIds(Collection<UUID> commentIds) {
|
||||
if (commentIds == null || commentIds.isEmpty()) return Map.of();
|
||||
Map<UUID, UUID> result = new HashMap<>();
|
||||
Map<UUID, CommentData> result = new HashMap<>();
|
||||
for (DocumentComment c : commentRepository.findAllById(commentIds)) {
|
||||
if (c.getAnnotationId() != null) result.put(c.getId(), c.getAnnotationId());
|
||||
result.put(c.getId(), new CommentData(c.getAnnotationId(), stripAndTruncate(c.getContent())));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private String stripAndTruncate(String html) {
|
||||
if (html == null || html.isBlank()) return "";
|
||||
String text = Jsoup.parse(html).text().trim();
|
||||
return text.length() > PREVIEW_MAX_CHARS ? text.substring(0, PREVIEW_MAX_CHARS) : text;
|
||||
}
|
||||
|
||||
public List<DocumentComment> getCommentsForBlock(UUID blockId) {
|
||||
List<DocumentComment> roots = commentRepository.findByBlockIdAndParentIdIsNull(blockId);
|
||||
return withRepliesAndMentions(roots);
|
||||
|
||||
@@ -15,6 +15,7 @@ import org.springframework.web.server.ResponseStatusException;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
// "Handler" is Spring's @RestControllerAdvice naming convention — not a generic suffix.
|
||||
@RestControllerAdvice
|
||||
@Slf4j
|
||||
public class GlobalExceptionHandler {
|
||||
|
||||
@@ -56,6 +56,10 @@ public class GeschichteService {
|
||||
|
||||
// ─── Read API ────────────────────────────────────────────────────────────
|
||||
|
||||
public long countPublished() {
|
||||
return geschichteRepository.count(GeschichteSpecifications.hasStatus(GeschichteStatus.PUBLISHED));
|
||||
}
|
||||
|
||||
public Geschichte getById(UUID id) {
|
||||
Geschichte g = geschichteRepository.findById(id)
|
||||
.orElseThrow(() -> DomainException.notFound(
|
||||
@@ -77,8 +81,10 @@ public class GeschichteService {
|
||||
GeschichteStatus effective = currentUserHasBlogWrite() ? status : GeschichteStatus.PUBLISHED;
|
||||
int safeLimit = limit <= 0 ? DEFAULT_LIMIT : Math.min(limit, MAX_LIMIT);
|
||||
|
||||
UUID authorId = effective == GeschichteStatus.DRAFT ? currentUser().getId() : null;
|
||||
Specification<Geschichte> spec = Specification.allOf(
|
||||
GeschichteSpecifications.hasStatus(effective),
|
||||
GeschichteSpecifications.hasAuthor(authorId),
|
||||
GeschichteSpecifications.hasAllPersons(personIds),
|
||||
GeschichteSpecifications.hasDocument(documentId),
|
||||
GeschichteSpecifications.orderByDisplayDateDesc()
|
||||
|
||||
@@ -42,6 +42,12 @@ public final class GeschichteSpecifications {
|
||||
};
|
||||
}
|
||||
|
||||
// null authorId → no restriction (PUBLISHED path passes null; Spring Data skips null predicates)
|
||||
public static Specification<Geschichte> hasAuthor(UUID authorId) {
|
||||
return (root, query, cb) ->
|
||||
authorId == null ? null : cb.equal(root.get("author").get("id"), authorId);
|
||||
}
|
||||
|
||||
public static Specification<Geschichte> hasDocument(UUID documentId) {
|
||||
return (root, query, cb) -> {
|
||||
if (documentId == null) return null;
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
# geschichte
|
||||
|
||||
Family stories — curated narrative pieces that weave together persons, documents, and commentary into a publishable article. German: *Geschichte* (story / history).
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Entity: `Geschichte`.
|
||||
Lifecycle: `DRAFT → PUBLISHED` (only published stories are visible to non-authors).
|
||||
Features: story CRUD, rich-text editing with person and document cross-references, publish/unpublish toggle, comment thread (shared component from `shared/discussion/`).
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
- `Person` or `Document` records — stories reference them by ID. Deleting a Person or Document does not cascade to Geschichte.
|
||||
- Comment storage — shared comment infrastructure is in `document/comment/` (or `shared/discussion/` on the frontend).
|
||||
|
||||
## Public surface (called from other domains)
|
||||
|
||||
| Method | Consumer | Purpose |
|
||||
|---|---|---|
|
||||
| `getById(UUID)` | notification | Resolve story context in mention notifications |
|
||||
| `list(...)` | dashboard | Recent stories for the activity feed |
|
||||
| `count()` | dashboard | Published story count for stats |
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `GeschichteController` — REST under `/api/geschichten`
|
||||
- `GeschichteService` — CRUD, publish lifecycle
|
||||
- `GeschichteRepository` — list by status, author
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
- `PersonService.getById()` / `getAllById()` — resolve person references in story body
|
||||
- `DocumentService.getDocumentsByIds()` — resolve document references in story body
|
||||
- `AuditService.logAfterCommit()` — story mutations are audited
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
`frontend/src/lib/geschichte/README.md`
|
||||
@@ -0,0 +1,41 @@
|
||||
# notification
|
||||
|
||||
In-app messages delivered in real time via SSE and persisted in the bell-icon dropdown. Notifications are created by other domains in response to events (comment mentions, replies).
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Entity: `Notification`.
|
||||
Features: create and deliver notifications, unread count, mark-read, SSE real-time push, per-user delivery preferences (stored as fields on `AppUser`, managed by `user/`).
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
- `AppUser` (recipient) — owned by `user/`
|
||||
- `Document` or `Geschichte` (notification context) — referenced by ID only
|
||||
|
||||
## Public surface (called from other domains)
|
||||
|
||||
| Method | Consumer | Purpose |
|
||||
|---|---|---|
|
||||
| `notifyMentions(mentionedUserIds, comment)` | document (comment) | Push mention notifications when a comment contains @mentions |
|
||||
| `notifyReply(reply, participantIds)` | document (comment) | Push reply notification to all thread participants |
|
||||
| `countUnread(userId)` | user session | Unread badge count in the nav bar |
|
||||
| `getNotifications(userId)` | dashboard / activity | Notification list for bell dropdown |
|
||||
| `markRead(id)` / `markAllRead(userId)` | notification controller | User-driven read-state updates |
|
||||
| `updatePreferences(userId, dto)` | notification controller | Per-user delivery preferences |
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `NotificationController` — REST under `/api/notifications`
|
||||
- `NotificationService` — create, query, mark-read
|
||||
- `SseEmitterRegistry` — runtime-stateful component that keeps one `SseEmitter` per connected user. On `notifyMentions()` / `notifyReply()`, the service writes to `SseEmitterRegistry` to push real-time events. SSE connections go **backend → browser directly**, not via the SvelteKit SSR layer.
|
||||
- `NotificationRepository` — persisted notification rows
|
||||
- `NotificationPreferenceDTO` — read/write DTO for preference endpoints (prefs stored on `AppUser`)
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
**Outbound (this domain calls):**
|
||||
- `DocumentService.findTitlesByIds(List<UUID>)` — enriches notification DTOs with document titles for display in the bell dropdown
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
`frontend/src/lib/notification/README.md`
|
||||
@@ -0,0 +1,44 @@
|
||||
# ocr
|
||||
|
||||
OCR/HTR pipeline orchestration. This domain manages job lifecycle and result ingestion — it does **not** perform OCR. Actual text recognition runs in the Python `ocr-service/` container (port 8000, internal network only).
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Entities: `OcrJob`, `OcrJobDocument`, `SenderModel`.
|
||||
Features: start OCR jobs, track job lifecycle (`PENDING → RUNNING → DONE / FAILED`), stream transcription blocks back into `document/transcription/`, sender-model training, segmentation training.
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
- Document content — `Document` and `TranscriptionBlock` are owned by `document/`
|
||||
- File storage — presigned MinIO URLs are generated by `filestorage/FileService` and passed to the OCR service
|
||||
- OCR processing — the Python `ocr-service/` executes Surya (typewritten) and Kraken (Kurrent/Sütterlin HTR) and streams results back
|
||||
|
||||
## Public surface (called from other domains)
|
||||
|
||||
| Method | Consumer | Purpose |
|
||||
|---|---|---|
|
||||
| `startOcr(documentId, ...)` | document | Trigger an OCR job for a document |
|
||||
| `getJob(UUID)` | document | Fetch job status |
|
||||
| `getDocumentOcrStatus(UUID)` | document | Per-document OCR status summary |
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `OcrController` — REST under `/api/ocr`
|
||||
- `OcrService` — job creation, presigned URL generation, result ingestion
|
||||
- `OcrBatchService` — batch job workflows
|
||||
- `OcrAsyncRunner` — `@Async` execution of OCR jobs
|
||||
- `OcrTrainingService` — calls `/train` and `/segtrain` on the Python service (protected by `X-Training-Token` header)
|
||||
- `OcrJobRepository` / `OcrJobDocumentRepository`
|
||||
- `SenderModelRepository` — trained sender-recognition models
|
||||
- `OcrClient` (interface) / `RestClientOcrClient` — HTTP client for the Python OCR service; mockable for tests
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
- `DocumentService.getDocumentById()` / `getDocumentsByIds()` — resolve target documents
|
||||
- `DocumentService.addTrainingLabel()` — attach confirmed sender labels after training
|
||||
- `FileService.generatePresignedUrl()` — generate MinIO presigned URLs passed to the OCR service (PDF bytes never flow through the backend)
|
||||
- `AuditService.logAfterCommit()` — OCR job events are audited
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
`frontend/src/lib/ocr/README.md`
|
||||
@@ -35,7 +35,14 @@ public class PersonController {
|
||||
|
||||
@GetMapping
|
||||
@RequirePermission(Permission.READ_ALL)
|
||||
public ResponseEntity<List<PersonSummaryDTO>> getPersons(@RequestParam(required = false) String q) {
|
||||
public ResponseEntity<List<PersonSummaryDTO>> getPersons(
|
||||
@RequestParam(required = false) String q,
|
||||
@RequestParam(required = false, defaultValue = "0") int size,
|
||||
@RequestParam(required = false) String sort) {
|
||||
if ("documentCount".equals(sort) && size > 0 && q == null) {
|
||||
int safeSize = Math.min(size, 50);
|
||||
return ResponseEntity.ok(personService.findTopByDocumentCount(safeSize));
|
||||
}
|
||||
return ResponseEntity.ok(personService.findAll(q));
|
||||
}
|
||||
|
||||
|
||||
@@ -69,6 +69,22 @@ public interface PersonRepository extends JpaRepository<Person, UUID> {
|
||||
nativeQuery = true)
|
||||
List<PersonSummaryDTO> searchWithDocumentCount(@Param("query") String query);
|
||||
|
||||
// ORDER BY uses the computed alias "documentCount" — valid PostgreSQL (aliases allowed in ORDER BY,
|
||||
// unlike WHERE/HAVING). This is intentional; it would silently fail on MySQL or H2.
|
||||
@Query(value = """
|
||||
SELECT p.id, p.title, p.first_name AS firstName, p.last_name AS lastName,
|
||||
p.person_type AS personType,
|
||||
p.alias, p.birth_year AS birthYear, p.death_year AS deathYear, p.notes,
|
||||
p.family_member AS familyMember,
|
||||
(SELECT COUNT(*) FROM documents d WHERE d.sender_id = p.id)
|
||||
+ (SELECT COUNT(*) FROM document_receivers dr WHERE dr.person_id = p.id) AS documentCount
|
||||
FROM persons p
|
||||
ORDER BY documentCount DESC
|
||||
LIMIT :limit
|
||||
""",
|
||||
nativeQuery = true)
|
||||
List<PersonSummaryDTO> findTopByDocumentCount(@Param("limit") int limit);
|
||||
|
||||
// --- Correspondent queries ---
|
||||
|
||||
@Query(value = """
|
||||
|
||||
@@ -41,6 +41,10 @@ public class PersonService {
|
||||
return personRepository.searchWithDocumentCount(q.trim());
|
||||
}
|
||||
|
||||
public List<PersonSummaryDTO> findTopByDocumentCount(int limit) {
|
||||
return personRepository.findTopByDocumentCount(limit);
|
||||
}
|
||||
|
||||
public Person getById(UUID id) {
|
||||
return personRepository.findById(id)
|
||||
.orElseThrow(() -> DomainException.notFound(ErrorCode.PERSON_NOT_FOUND, "Person not found: " + id));
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
# person
|
||||
|
||||
Historical individuals referenced by documents. A `Person` is a family member who appears as a sender or receiver in the archive — they are never login accounts.
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Entities: `Person`, `PersonNameAlias`, `PersonRelationship`.
|
||||
Features: person CRUD, name alias management, person merge (deduplication), family-member designation, relationship graph, person type classification (FAMILY, CORRESPONDENT, INSTITUTION).
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
- `AppUser` — login accounts are in `user/`. A `Person` record has no login credentials. The separation is deliberate: a historical family member from 1905 is never a system user.
|
||||
- Document content — `Person` records are referenced by documents (as sender/receiver), not the other way around.
|
||||
- Relationship rendering — the Stammbaum view is derived by the frontend from `PersonRelationship` data.
|
||||
|
||||
## Public surface (called from other domains)
|
||||
|
||||
| Method | Consumer | Purpose |
|
||||
|---|---|---|
|
||||
| `getById(UUID)` | document, geschichte, ocr | Fetch one person by ID |
|
||||
| `getAllById(List<UUID>)` | document | Bulk fetch for sender/receiver resolution |
|
||||
| `findAll(String q)` | document, dashboard | List all persons |
|
||||
| `findByName(String firstName, String lastName)` | document | Typeahead search |
|
||||
| `findOrCreateByAlias(String rawName)` | importing | Idempotent create during mass import; type classification happens internally |
|
||||
| `findAllFamilyMembers()` | dashboard | Family member list for stats |
|
||||
| `findCorrespondents()` | document | Correspondent list for conversation filter |
|
||||
| `count()` | dashboard | Total person count for stats |
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `PersonController` — REST under `/api/persons`
|
||||
- `PersonService` — CRUD, merge, alias management, family-member designation
|
||||
- `PersonRepository` — sorted list, name search
|
||||
- `PersonNameAlias` / `PersonNameAliasRepository` — alternative name spellings
|
||||
- `PersonNameParser` / `PersonTypeClassifier` — name parsing utilities
|
||||
- `PersonSummaryDTO` — lightweight DTO for typeahead / list views
|
||||
- Sub-package: `relationship/` — `PersonRelationship`, `RelationshipService`, `RelationshipController`
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
- `AuditService.logAfterCommit()` — person mutations are audited
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
`frontend/src/lib/person/README.md`
|
||||
@@ -7,6 +7,7 @@ import org.springframework.security.core.Authentication;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
// Cross-cutting auth helper; no domain home — "Utils" is the correct suffix here.
|
||||
public final class SecurityUtils {
|
||||
|
||||
private SecurityUtils() {}
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# tag
|
||||
|
||||
Hierarchical document categories. Tags form a tree via a self-referencing `parent_id` column and are applied to documents for filtering and browse navigation.
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Entity: `Tag` (self-referencing `parent_id` tree).
|
||||
Features: tag CRUD, hierarchical deletion (cascade to descendants), tag typeahead, admin tag management (rename, reparent, merge).
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
- Documents — the `document_tags` join table is on the document side. `Tag` does not hold document references.
|
||||
- Tag assignment — adding/removing a tag from a document is handled by `DocumentService`.
|
||||
|
||||
## Public surface (called from other domains)
|
||||
|
||||
| Method | Consumer | Purpose |
|
||||
|---|---|---|
|
||||
| `delete(UUID)` | document | Remove the tag record; called by `DocumentService.deleteTagCascading()` after all document references are unlinked |
|
||||
| `deleteWithDescendants(UUID)` | admin tag UI | Recursive subtree deletion |
|
||||
| `expandTagNamesToDescendantIdSets(List<String>)` | document | Expand tag filter to include descendant tags |
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `TagController` — REST under `/api/tags`
|
||||
- `TagService` — CRUD, hierarchy traversal, cascade-delete coordination
|
||||
- `TagRepository` — find-or-create by name (case-insensitive), subtree queries
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
None. Documents reference tags; tags do not reference documents or other domains.
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
`frontend/src/lib/tag/README.md`
|
||||
@@ -88,7 +88,8 @@ public class AppUser {
|
||||
};
|
||||
|
||||
public static String computeColor(UUID id) {
|
||||
return PALETTE[Math.abs(id.hashCode()) % PALETTE.length];
|
||||
// Math.floorMod avoids the Integer.MIN_VALUE overflow trap in Math.abs(hashCode())
|
||||
return PALETTE[Math.floorMod(id.hashCode(), PALETTE.length)];
|
||||
}
|
||||
|
||||
@PrePersist
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# user
|
||||
|
||||
Login accounts and permission groups. An `AppUser` is a system user who can authenticate and act in the application — they are never a historical family member.
|
||||
|
||||
## What this domain owns
|
||||
|
||||
Entities: `AppUser`, `UserGroup`, password-reset tokens, invite tokens.
|
||||
Features: user CRUD, group CRUD, password change, password reset flow, invite links.
|
||||
|
||||
## What this domain does NOT own
|
||||
|
||||
- `Person` records — historical family members. An `AppUser` is never linked to a `Person`. This separation is intentional: a person who digitized letters in 2024 is not the same entity as their great-grandmother who wrote them in 1912. See `docs/GLOSSARY.md`.
|
||||
- Permission enforcement — `security/` owns `@RequirePermission` and `PermissionAspect`. `user/` only manages which permissions are stored on `UserGroup`.
|
||||
|
||||
## Public surface
|
||||
|
||||
`UserService` methods are consumed primarily by the security infrastructure and the admin UI. No other business-logic domain calls `UserService` directly.
|
||||
|
||||
The Spring Security chain (via `CustomUserDetailsService` in `security/`) calls `AppUserRepository.findByUsername()` on every authenticated request.
|
||||
|
||||
## Internal layout
|
||||
|
||||
- `UserController` — REST under `/api/users` (current user, CRUD)
|
||||
- `AuthController` — password reset, invite flow
|
||||
- `UserService` — BCrypt-encoded passwords, group assignment
|
||||
- `AppUserRepository` — find by username (used by Spring Security)
|
||||
- `UserGroupRepository` — group and permission management
|
||||
|
||||
## Cross-domain dependencies
|
||||
|
||||
- `AuditService.logAfterCommit()` — user-management mutations are audited
|
||||
|
||||
## Frontend counterpart
|
||||
|
||||
`frontend/src/lib/user/README.md`
|
||||
@@ -271,9 +271,10 @@ public class UserService {
|
||||
|
||||
@Transactional
|
||||
public UserGroup createGroup(GroupDTO dto) {
|
||||
UserGroup group = new UserGroup();
|
||||
group.setName(dto.getName());
|
||||
group.setPermissions(dto.getPermissions());
|
||||
UserGroup group = UserGroup.builder()
|
||||
.name(dto.getName())
|
||||
.permissions(dto.getPermissions() != null ? dto.getPermissions() : new HashSet<>())
|
||||
.build();
|
||||
return groupRepository.save(group);
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,12 @@ spring:
|
||||
starttls:
|
||||
enable: true
|
||||
|
||||
server:
|
||||
# Behind Caddy/reverse proxy: trust X-Forwarded-{Proto,For,Host} so that
|
||||
# request.getScheme(), redirect URLs, and Spring Session "Secure" cookies
|
||||
# reflect the original https client request, not the http hop from Caddy.
|
||||
forward-headers-strategy: native
|
||||
|
||||
management:
|
||||
health:
|
||||
mail:
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
CREATE INDEX IF NOT EXISTS idx_documents_updated_at ON documents(updated_at DESC);
|
||||
@@ -0,0 +1,8 @@
|
||||
-- Speeds up "documents by sender" queries used on /persons/[id] Korrespondenz-Überblick (#306),
|
||||
-- /briefwechsel, and bulk-edit flows.
|
||||
CREATE INDEX IF NOT EXISTS idx_documents_sender_id
|
||||
ON documents(sender_id);
|
||||
|
||||
-- Speeds up "comments by author" queries on admin user detail and (future) contributor profile.
|
||||
CREATE INDEX IF NOT EXISTS idx_comments_author_id
|
||||
ON document_comments(author_id);
|
||||
@@ -0,0 +1,7 @@
|
||||
-- Remove duplicate (group_id, permission) rows that accumulated without a UNIQUE constraint.
|
||||
-- Keeps the row with the smallest ctid (earliest physical insertion order).
|
||||
DELETE FROM group_permissions a
|
||||
USING group_permissions b
|
||||
WHERE a.ctid < b.ctid
|
||||
AND a.group_id = b.group_id
|
||||
AND a.permission = b.permission;
|
||||
@@ -0,0 +1,11 @@
|
||||
-- Add NOT NULL and PRIMARY KEY to group_permissions.
|
||||
-- Requires V63 to have run first (no duplicates can remain).
|
||||
--
|
||||
-- After this migration, future seed migrations can use:
|
||||
-- INSERT INTO group_permissions ... ON CONFLICT DO NOTHING
|
||||
-- instead of the INSERT ... WHERE NOT EXISTS pattern used before V64.
|
||||
ALTER TABLE group_permissions
|
||||
ALTER COLUMN permission SET NOT NULL;
|
||||
|
||||
ALTER TABLE group_permissions
|
||||
ADD CONSTRAINT pk_group_permissions PRIMARY KEY (group_id, permission);
|
||||
@@ -0,0 +1,8 @@
|
||||
-- Promote the de-facto unique constraint on transcription_block_mentioned_persons to a named PK.
|
||||
-- uq_tbmp_block_person (added in V57) is backed by a B-tree index identical to a PK;
|
||||
-- this rename makes the naming convention explicit (pk_* vs uq_*).
|
||||
ALTER TABLE transcription_block_mentioned_persons
|
||||
DROP CONSTRAINT uq_tbmp_block_person;
|
||||
|
||||
ALTER TABLE transcription_block_mentioned_persons
|
||||
ADD CONSTRAINT pk_tbmp PRIMARY KEY (block_id, person_id);
|
||||
@@ -399,6 +399,86 @@ class MigrationIntegrationTest {
|
||||
AND dc.annotation_id IS NOT NULL
|
||||
""";
|
||||
|
||||
// ─── V62: indexes on FK columns ──────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void v62_idx_documents_sender_id_exists() {
|
||||
Integer count = jdbc.queryForObject(
|
||||
"SELECT COUNT(*) FROM pg_catalog.pg_indexes WHERE tablename = 'documents' AND indexname = 'idx_documents_sender_id'",
|
||||
Integer.class);
|
||||
assertThat(count).isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void v62_idx_comments_author_id_exists() {
|
||||
Integer count = jdbc.queryForObject(
|
||||
"SELECT COUNT(*) FROM pg_catalog.pg_indexes WHERE tablename = 'document_comments' AND indexname = 'idx_comments_author_id'",
|
||||
Integer.class);
|
||||
assertThat(count).isEqualTo(1);
|
||||
}
|
||||
|
||||
// ─── V63+V64: group_permissions dedup + primary key ──────────────────────
|
||||
|
||||
@Test
|
||||
void v64_pk_group_permissions_exists() {
|
||||
Integer count = jdbc.queryForObject(
|
||||
"""
|
||||
SELECT COUNT(*) FROM pg_catalog.pg_constraint c
|
||||
JOIN pg_catalog.pg_class t ON c.conrelid = t.oid
|
||||
WHERE t.relname = 'group_permissions'
|
||||
AND c.conname = 'pk_group_permissions'
|
||||
AND c.contype = 'p'
|
||||
""",
|
||||
Integer.class);
|
||||
assertThat(count).isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void v64_permission_column_isNotNullable() {
|
||||
Integer count = jdbc.queryForObject(
|
||||
"""
|
||||
SELECT COUNT(*) FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'group_permissions'
|
||||
AND column_name = 'permission'
|
||||
AND is_nullable = 'NO'
|
||||
""",
|
||||
Integer.class);
|
||||
assertThat(count).isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Transactional(propagation = Propagation.NOT_SUPPORTED)
|
||||
void v64_rejectsDuplicateGroupPermission() {
|
||||
UUID groupId = createUserGroup("DuplicateTestGroup-" + UUID.randomUUID());
|
||||
try {
|
||||
jdbc.update("INSERT INTO group_permissions (group_id, permission) VALUES (?, 'READ_ALL')", groupId);
|
||||
|
||||
assertThatThrownBy(() ->
|
||||
jdbc.update("INSERT INTO group_permissions (group_id, permission) VALUES (?, 'READ_ALL')", groupId)
|
||||
).isInstanceOf(DataIntegrityViolationException.class);
|
||||
} finally {
|
||||
jdbc.update("DELETE FROM group_permissions WHERE group_id = ?", groupId);
|
||||
jdbc.update("DELETE FROM user_groups WHERE id = ?", groupId);
|
||||
}
|
||||
}
|
||||
|
||||
// ─── V65: tbmp UNIQUE promoted to PRIMARY KEY ─────────────────────────────
|
||||
|
||||
@Test
|
||||
void v65_pk_tbmp_exists() {
|
||||
Integer count = jdbc.queryForObject(
|
||||
"""
|
||||
SELECT COUNT(*) FROM pg_catalog.pg_constraint c
|
||||
JOIN pg_catalog.pg_class t ON c.conrelid = t.oid
|
||||
WHERE t.relname = 'transcription_block_mentioned_persons'
|
||||
AND c.conname = 'pk_tbmp'
|
||||
AND c.contype = 'p'
|
||||
""",
|
||||
Integer.class);
|
||||
assertThat(count).isEqualTo(1);
|
||||
}
|
||||
|
||||
// ─── helpers ─────────────────────────────────────────────────────────────
|
||||
|
||||
private UUID createPerson(String firstName, String lastName) {
|
||||
@@ -482,4 +562,10 @@ class MigrationIntegrationTest {
|
||||
""", id, recipientId, docId, commentId);
|
||||
return id;
|
||||
}
|
||||
|
||||
private UUID createUserGroup(String name) {
|
||||
UUID id = UUID.randomUUID();
|
||||
jdbc.update("INSERT INTO user_groups (id, name) VALUES (?, ?)", id, name);
|
||||
return id;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
package org.raddatz.familienarchiv.config;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.springframework.beans.factory.config.YamlPropertiesFactoryBean;
|
||||
import org.springframework.boot.web.server.autoconfigure.ServerProperties.ForwardHeadersStrategy;
|
||||
import org.springframework.boot.context.properties.bind.Binder;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertySources;
|
||||
import org.springframework.core.env.PropertiesPropertySource;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* Binds {@code server.forward-headers-strategy} from {@code application.yaml} into
|
||||
* Spring Boot's typed {@link ForwardHeadersStrategy} enum. The binder rejects any
|
||||
* value that is not a valid enum constant ({@code BindException}), so a typo
|
||||
* ({@code "nativ"}, {@code "Native"}, {@code "framework "}) or a future Spring
|
||||
* rename of the property fails the test, not silently degrades to {@code NONE}.
|
||||
*
|
||||
* <p>No Spring context, no embedded server, no Testcontainers — this is the
|
||||
* cheapest test that pins the contract "Caddy's X-Forwarded-Proto is trusted".
|
||||
*/
|
||||
class ForwardHeadersConfigurationTest {
|
||||
|
||||
@Test
|
||||
void forward_headers_strategy_binds_to_NATIVE() {
|
||||
YamlPropertiesFactoryBean yaml = new YamlPropertiesFactoryBean();
|
||||
yaml.setResources(new ClassPathResource("application.yaml"));
|
||||
Properties props = yaml.getObject();
|
||||
assertThat(props).as("application.yaml must be on the classpath").isNotNull();
|
||||
|
||||
Binder binder = new Binder(ConfigurationPropertySources.from(
|
||||
new PropertiesPropertySource("application", props)));
|
||||
|
||||
ForwardHeadersStrategy strategy = binder
|
||||
.bind("server.forward-headers-strategy", ForwardHeadersStrategy.class)
|
||||
.orElseThrow(() -> new AssertionError(
|
||||
"server.forward-headers-strategy is missing from application.yaml"));
|
||||
|
||||
assertThat(strategy)
|
||||
.as("Spring must trust X-Forwarded-Proto from Caddy so that "
|
||||
+ "request.getScheme(), redirect URLs, and the Spring Session "
|
||||
+ "'Secure' cookie reflect the original https client request.")
|
||||
.isEqualTo(ForwardHeadersStrategy.NATIVE);
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@ import org.raddatz.familienarchiv.user.AppUser;
|
||||
import org.raddatz.familienarchiv.document.Document;
|
||||
import org.raddatz.familienarchiv.document.transcription.TranscriptionBlock;
|
||||
import org.raddatz.familienarchiv.document.comment.CommentService;
|
||||
import org.raddatz.familienarchiv.document.comment.CommentData;
|
||||
import org.raddatz.familienarchiv.document.DocumentService;
|
||||
import org.raddatz.familienarchiv.document.transcription.TranscriptionService;
|
||||
import org.raddatz.familienarchiv.user.UserService;
|
||||
@@ -142,7 +143,8 @@ class DashboardServiceTest {
|
||||
when(documentService.getDocumentsByIds(List.of(docId))).thenReturn(List.of(
|
||||
Document.builder().id(docId).title("B").originalFilename("b.pdf").receivers(new HashSet<>()).build()
|
||||
));
|
||||
when(commentService.findAnnotationIdsByIds(List.of(commentId))).thenReturn(Map.of());
|
||||
when(commentService.findDataByIds(List.of(commentId)))
|
||||
.thenReturn(Map.of(commentId, new CommentData(null, "preview text")));
|
||||
|
||||
List<ActivityFeedItemDTO> items = dashboardService.getActivity(userId, 5, AuditKind.ROLLUP_ELIGIBLE);
|
||||
|
||||
@@ -162,8 +164,8 @@ class DashboardServiceTest {
|
||||
when(documentService.getDocumentsByIds(List.of(docId))).thenReturn(List.of(
|
||||
Document.builder().id(docId).title("B").originalFilename("b.pdf").receivers(new HashSet<>()).build()
|
||||
));
|
||||
when(commentService.findAnnotationIdsByIds(List.of(commentId)))
|
||||
.thenReturn(Map.of(commentId, annotationId));
|
||||
when(commentService.findDataByIds(List.of(commentId)))
|
||||
.thenReturn(Map.of(commentId, new CommentData(annotationId, "preview text")));
|
||||
|
||||
List<ActivityFeedItemDTO> items = dashboardService.getActivity(userId, 5, AuditKind.ROLLUP_ELIGIBLE);
|
||||
|
||||
@@ -187,7 +189,62 @@ class DashboardServiceTest {
|
||||
assertThat(items).hasSize(1);
|
||||
assertThat(items.get(0).commentId()).isNull();
|
||||
assertThat(items.get(0).annotationId()).isNull();
|
||||
verify(commentService, never()).findAnnotationIdsByIds(anyList());
|
||||
verify(commentService, never()).findDataByIds(anyList());
|
||||
}
|
||||
|
||||
// ─── getActivity commentPreview ───────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void getActivity_populates_commentPreview_for_COMMENT_ADDED_rows() {
|
||||
UUID userId = UUID.randomUUID();
|
||||
UUID docId = UUID.randomUUID();
|
||||
UUID commentId = UUID.randomUUID();
|
||||
|
||||
ActivityFeedRow row = mockFeedRow(docId, "COMMENT_ADDED", commentId);
|
||||
when(auditLogQueryService.findActivityFeed(userId, 5, AuditKind.ROLLUP_ELIGIBLE)).thenReturn(List.of(row));
|
||||
when(documentService.getDocumentsByIds(List.of(docId))).thenReturn(List.of(
|
||||
Document.builder().id(docId).title("B").originalFilename("b.pdf").receivers(new HashSet<>()).build()
|
||||
));
|
||||
when(commentService.findDataByIds(List.of(commentId)))
|
||||
.thenReturn(Map.of(commentId, new CommentData(null, "Hello family!")));
|
||||
|
||||
List<ActivityFeedItemDTO> items = dashboardService.getActivity(userId, 5, AuditKind.ROLLUP_ELIGIBLE);
|
||||
|
||||
assertThat(items.get(0).commentPreview()).isEqualTo("Hello family!");
|
||||
}
|
||||
|
||||
@Test
|
||||
void getActivity_leaves_commentPreview_null_for_TEXT_SAVED_rows() {
|
||||
UUID userId = UUID.randomUUID();
|
||||
UUID docId = UUID.randomUUID();
|
||||
|
||||
ActivityFeedRow row = mockFeedRow(docId, "TEXT_SAVED", null);
|
||||
when(auditLogQueryService.findActivityFeed(userId, 5, AuditKind.ROLLUP_ELIGIBLE)).thenReturn(List.of(row));
|
||||
when(documentService.getDocumentsByIds(List.of(docId))).thenReturn(List.of(
|
||||
Document.builder().id(docId).title("B").originalFilename("b.pdf").receivers(new HashSet<>()).build()
|
||||
));
|
||||
|
||||
List<ActivityFeedItemDTO> items = dashboardService.getActivity(userId, 5, AuditKind.ROLLUP_ELIGIBLE);
|
||||
|
||||
assertThat(items.get(0).commentPreview()).isNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
void getActivity_leaves_commentPreview_null_when_comment_is_deleted() {
|
||||
UUID userId = UUID.randomUUID();
|
||||
UUID docId = UUID.randomUUID();
|
||||
UUID deletedCommentId = UUID.randomUUID();
|
||||
|
||||
ActivityFeedRow row = mockFeedRow(docId, "COMMENT_ADDED", deletedCommentId);
|
||||
when(auditLogQueryService.findActivityFeed(userId, 5, AuditKind.ROLLUP_ELIGIBLE)).thenReturn(List.of(row));
|
||||
when(documentService.getDocumentsByIds(List.of(docId))).thenReturn(List.of(
|
||||
Document.builder().id(docId).title("B").originalFilename("b.pdf").receivers(new HashSet<>()).build()
|
||||
));
|
||||
when(commentService.findDataByIds(List.of(deletedCommentId))).thenReturn(Map.of());
|
||||
|
||||
List<ActivityFeedItemDTO> items = dashboardService.getActivity(userId, 5, AuditKind.ROLLUP_ELIGIBLE);
|
||||
|
||||
assertThat(items.get(0).commentPreview()).isNull();
|
||||
}
|
||||
|
||||
// ─── getPulse — always uses full ROLLUP_ELIGIBLE set ─────────────────────
|
||||
|
||||
@@ -44,7 +44,7 @@ class StatsControllerTest {
|
||||
@Test
|
||||
@WithMockUser(authorities = "READ_ALL")
|
||||
void getStats_returns200_withCorrectCounts() throws Exception {
|
||||
when(statsService.getStats()).thenReturn(new StatsDTO(4L, 12L));
|
||||
when(statsService.getStats()).thenReturn(new StatsDTO(4L, 12L, 2L));
|
||||
|
||||
mockMvc.perform(get("/api/stats"))
|
||||
.andExpect(status().isOk())
|
||||
@@ -55,7 +55,7 @@ class StatsControllerTest {
|
||||
@Test
|
||||
@WithMockUser(authorities = "READ_ALL")
|
||||
void getStats_returns200_withZeroCounts() throws Exception {
|
||||
when(statsService.getStats()).thenReturn(new StatsDTO(0L, 0L));
|
||||
when(statsService.getStats()).thenReturn(new StatsDTO(0L, 0L, 0L));
|
||||
|
||||
mockMvc.perform(get("/api/stats"))
|
||||
.andExpect(status().isOk())
|
||||
|
||||
@@ -7,6 +7,7 @@ import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.raddatz.familienarchiv.document.DocumentService;
|
||||
import org.raddatz.familienarchiv.dashboard.StatsDTO;
|
||||
import org.raddatz.familienarchiv.geschichte.GeschichteService;
|
||||
import org.raddatz.familienarchiv.person.PersonService;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
@@ -17,6 +18,7 @@ class StatsServiceTest {
|
||||
|
||||
@Mock PersonService personService;
|
||||
@Mock DocumentService documentService;
|
||||
@Mock GeschichteService geschichteService;
|
||||
@InjectMocks StatsService statsService;
|
||||
|
||||
@Test
|
||||
@@ -30,6 +32,17 @@ class StatsServiceTest {
|
||||
assertThat(stats.totalDocuments()).isEqualTo(12L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void getStats_includes_totalStories() {
|
||||
when(personService.count()).thenReturn(3L);
|
||||
when(documentService.count()).thenReturn(7L);
|
||||
when(geschichteService.countPublished()).thenReturn(5L);
|
||||
|
||||
StatsDTO stats = statsService.getStats();
|
||||
|
||||
assertThat(stats.totalStories()).isEqualTo(5L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void getStats_returnsZero_whenNoEntities() {
|
||||
when(personService.count()).thenReturn(0L);
|
||||
|
||||
@@ -44,6 +44,7 @@ import static org.mockito.Mockito.when;
|
||||
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
|
||||
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.multipart;
|
||||
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch;
|
||||
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
|
||||
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.header;
|
||||
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
|
||||
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
|
||||
@@ -1240,4 +1241,100 @@ class DocumentControllerTest {
|
||||
.andExpect(jsonPath("$.errors[0].message").value(
|
||||
org.hamcrest.Matchers.containsString("not found")));
|
||||
}
|
||||
|
||||
// ─── GET /api/documents/density ───────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void density_returns401_whenUnauthenticated() throws Exception {
|
||||
mockMvc.perform(get("/api/documents/density"))
|
||||
.andExpect(status().isUnauthorized());
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser
|
||||
void density_returns200_withResultBody_whenAuthenticated() throws Exception {
|
||||
when(documentService.getDensity(any())).thenReturn(
|
||||
new DocumentDensityResult(
|
||||
List.of(new MonthBucket("1915-08", 2), new MonthBucket("1915-09", 1)),
|
||||
java.time.LocalDate.of(1915, 8, 3),
|
||||
java.time.LocalDate.of(1915, 9, 1)));
|
||||
|
||||
mockMvc.perform(get("/api/documents/density"))
|
||||
.andExpect(status().isOk())
|
||||
.andExpect(jsonPath("$.buckets").isArray())
|
||||
.andExpect(jsonPath("$.buckets[0].month").value("1915-08"))
|
||||
.andExpect(jsonPath("$.buckets[0].count").value(2))
|
||||
.andExpect(jsonPath("$.minDate").value("1915-08-03"))
|
||||
.andExpect(jsonPath("$.maxDate").value("1915-09-01"));
|
||||
}
|
||||
|
||||
// Pins produces=APPLICATION_JSON_VALUE on the density mapping so the OpenAPI/TypeScript
|
||||
// codegen records application/json instead of the wildcard. Without produces= the
|
||||
// request-mapping accepts any Accept header and the OpenAPI emit falls back to the
|
||||
// wildcard. Sending an Accept header that JSON cannot satisfy must NOT return 200 —
|
||||
// Spring rejects with 406 (HttpMediaTypeNotAcceptableException), which our
|
||||
// GlobalExceptionHandler may surface as 400. Either way it proves the route is
|
||||
// locked to JSON.
|
||||
@Test
|
||||
@WithMockUser
|
||||
void density_declaresApplicationJsonContentType() throws Exception {
|
||||
when(documentService.getDensity(any())).thenReturn(
|
||||
new DocumentDensityResult(List.of(), null, null));
|
||||
|
||||
mockMvc.perform(get("/api/documents/density")
|
||||
.accept(MediaType.APPLICATION_XML))
|
||||
.andExpect(status().is4xxClientError());
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser
|
||||
void density_emitsPrivateCacheControlHeader() throws Exception {
|
||||
when(documentService.getDensity(any())).thenReturn(
|
||||
new DocumentDensityResult(List.of(), null, null));
|
||||
|
||||
mockMvc.perform(get("/api/documents/density"))
|
||||
.andExpect(status().isOk())
|
||||
.andExpect(header().string("Cache-Control",
|
||||
org.hamcrest.Matchers.containsString("max-age=300")))
|
||||
.andExpect(header().string("Cache-Control",
|
||||
org.hamcrest.Matchers.containsString("private")));
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser
|
||||
void density_forwardsSenderAndTagFilters() throws Exception {
|
||||
when(documentService.getDensity(any())).thenReturn(
|
||||
new DocumentDensityResult(List.of(), null, null));
|
||||
UUID senderId = UUID.randomUUID();
|
||||
|
||||
mockMvc.perform(get("/api/documents/density")
|
||||
.param("senderId", senderId.toString())
|
||||
.param("tag", "Familie")
|
||||
.param("tag", "Urlaub")
|
||||
.param("tagOp", "OR"))
|
||||
.andExpect(status().isOk());
|
||||
|
||||
verify(documentService).getDensity(eq(new DensityFilters(
|
||||
null, senderId, null,
|
||||
List.of("Familie", "Urlaub"),
|
||||
null, null,
|
||||
org.raddatz.familienarchiv.tag.TagOperator.OR)));
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser
|
||||
void density_forwardsStatusAndQueryText() throws Exception {
|
||||
when(documentService.getDensity(any())).thenReturn(
|
||||
new DocumentDensityResult(List.of(), null, null));
|
||||
|
||||
mockMvc.perform(get("/api/documents/density")
|
||||
.param("q", "Brief")
|
||||
.param("status", "REVIEWED"))
|
||||
.andExpect(status().isOk());
|
||||
|
||||
verify(documentService).getDensity(eq(new DensityFilters(
|
||||
"Brief", null, null, null, null,
|
||||
DocumentStatus.REVIEWED,
|
||||
org.raddatz.familienarchiv.tag.TagOperator.AND)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,162 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.raddatz.familienarchiv.PostgresContainerConfig;
|
||||
import org.raddatz.familienarchiv.person.Person;
|
||||
import org.raddatz.familienarchiv.person.PersonRepository;
|
||||
import org.raddatz.familienarchiv.tag.Tag;
|
||||
import org.raddatz.familienarchiv.tag.TagRepository;
|
||||
import org.raddatz.familienarchiv.tag.TagOperator;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.bean.override.mockito.MockitoBean;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import software.amazon.awssdk.services.s3.S3Client;
|
||||
|
||||
import java.time.LocalDate;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* End-to-end test for the filter-reactive density aggregation.
|
||||
* Density bars must recompute as the user changes other filters (sender, tag,
|
||||
* status, …). The endpoint deliberately does NOT honour `from`/`to` — the chart
|
||||
* is the surface for picking those, so it must always span the broader space
|
||||
* the user is selecting within.
|
||||
*/
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
@ActiveProfiles("test")
|
||||
@Import(PostgresContainerConfig.class)
|
||||
@Transactional
|
||||
class DocumentDensityIntegrationTest {
|
||||
|
||||
@MockitoBean S3Client s3Client;
|
||||
@Autowired DocumentService documentService;
|
||||
@Autowired DocumentRepository documentRepository;
|
||||
@Autowired PersonRepository personRepository;
|
||||
@Autowired TagRepository tagRepository;
|
||||
|
||||
private Person hans;
|
||||
private Person anna;
|
||||
private Tag familieTag;
|
||||
private Tag urlaubTag;
|
||||
|
||||
@BeforeEach
|
||||
void seed() {
|
||||
hans = personRepository.save(Person.builder().firstName("Hans").lastName("Müller").build());
|
||||
anna = personRepository.save(Person.builder().firstName("Anna").lastName("Weber").build());
|
||||
familieTag = tagRepository.save(Tag.builder().name("Familie").build());
|
||||
urlaubTag = tagRepository.save(Tag.builder().name("Urlaub").build());
|
||||
}
|
||||
|
||||
private static DensityFilters noFilters() {
|
||||
return new DensityFilters(null, null, null, null, null, null, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_returnsAllMonths_whenNoFiltersApplied() {
|
||||
save("a", LocalDate.of(1915, 8, 3), null, Set.of());
|
||||
save("b", LocalDate.of(1915, 8, 17), null, Set.of());
|
||||
save("c", LocalDate.of(1915, 9, 1), null, Set.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(noFilters());
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::month)
|
||||
.containsExactly("1915-08", "1915-09");
|
||||
assertThat(result.buckets()).extracting(MonthBucket::count).containsExactly(2, 1);
|
||||
assertThat(result.minDate()).isEqualTo(LocalDate.of(1915, 8, 3));
|
||||
assertThat(result.maxDate()).isEqualTo(LocalDate.of(1915, 9, 1));
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_filtersBySender() {
|
||||
save("a", LocalDate.of(1915, 8, 3), hans, Set.of());
|
||||
save("b", LocalDate.of(1916, 1, 4), hans, Set.of());
|
||||
save("c", LocalDate.of(1920, 5, 1), anna, Set.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(
|
||||
new DensityFilters(null, hans.getId(), null, null, null, null, null));
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::month)
|
||||
.containsExactly("1915-08", "1916-01");
|
||||
assertThat(result.maxDate()).isEqualTo(LocalDate.of(1916, 1, 4));
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_filtersByTag() {
|
||||
save("a", LocalDate.of(1915, 8, 3), null, Set.of(familieTag));
|
||||
save("b", LocalDate.of(1920, 5, 1), null, Set.of(urlaubTag));
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(
|
||||
new DensityFilters(null, null, null, List.of("Familie"), null, null, TagOperator.AND));
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::month).containsExactly("1915-08");
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_combinesSenderAndTag() {
|
||||
save("a", LocalDate.of(1915, 8, 3), hans, Set.of(familieTag));
|
||||
save("b", LocalDate.of(1916, 1, 4), hans, Set.of(urlaubTag));
|
||||
save("c", LocalDate.of(1920, 5, 1), anna, Set.of(familieTag));
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(
|
||||
new DensityFilters(null, hans.getId(), null, List.of("Familie"), null, null, TagOperator.AND));
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::month).containsExactly("1915-08");
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_filtersByStatus() {
|
||||
save("a", LocalDate.of(1915, 8, 3), null, Set.of(), DocumentStatus.UPLOADED);
|
||||
save("b", LocalDate.of(1916, 1, 4), null, Set.of(), DocumentStatus.PLACEHOLDER);
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(
|
||||
new DensityFilters(null, null, null, null, null, DocumentStatus.UPLOADED, null));
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::month).containsExactly("1915-08");
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_returnsEmpty_whenNoDocumentsMatch() {
|
||||
save("a", LocalDate.of(1915, 8, 3), hans, Set.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(
|
||||
new DensityFilters(null, anna.getId(), null, null, null, null, null));
|
||||
|
||||
assertThat(result.buckets()).isEmpty();
|
||||
assertThat(result.minDate()).isNull();
|
||||
assertThat(result.maxDate()).isNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_excludesDocumentsWithNullDate() {
|
||||
save("dated", LocalDate.of(1915, 8, 3), null, Set.of());
|
||||
save("undated", null, null, Set.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(noFilters());
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::count).containsExactly(1);
|
||||
}
|
||||
|
||||
private void save(String suffix, LocalDate date, Person sender, Set<Tag> tags) {
|
||||
save(suffix, date, sender, tags, DocumentStatus.UPLOADED);
|
||||
}
|
||||
|
||||
private void save(String suffix, LocalDate date, Person sender, Set<Tag> tags, DocumentStatus status) {
|
||||
documentRepository.save(Document.builder()
|
||||
.title("Doc " + suffix)
|
||||
.originalFilename("doc-" + suffix + "-" + UUID.randomUUID() + ".pdf")
|
||||
.status(status)
|
||||
.documentDate(date)
|
||||
.sender(sender)
|
||||
.tags(new HashSet<>(tags))
|
||||
.build());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
package org.raddatz.familienarchiv.document;
|
||||
|
||||
import jakarta.persistence.EntityManager;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.raddatz.familienarchiv.PostgresContainerConfig;
|
||||
import org.raddatz.familienarchiv.config.FlywayConfig;
|
||||
import org.raddatz.familienarchiv.document.DocumentRepository;
|
||||
import org.raddatz.familienarchiv.document.Document;
|
||||
import org.raddatz.familienarchiv.document.DocumentStatus;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.data.jpa.test.autoconfigure.DataJpaTest;
|
||||
import org.springframework.boot.jdbc.test.autoconfigure.AutoConfigureTestDatabase;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatNoException;
|
||||
|
||||
/**
|
||||
* Repository-level integration tests for {@code findFtsPageRaw}: verifies that the
|
||||
* paginated FTS query returns exactly page-size rows and that the window-function
|
||||
* total reflects the full match count, not just the page count.
|
||||
*
|
||||
* <p>Uses real Postgres via Testcontainers so the GIN index, tsvector trigger, and
|
||||
* {@code websearch_to_tsquery} semantics are identical to production.
|
||||
*
|
||||
* <p>{@code AFTER_CLASS} dirty-context keeps the Spring context alive for all tests
|
||||
* in this class and rebuilds it once at the end, rather than after every test.
|
||||
*/
|
||||
@DataJpaTest
|
||||
@AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE)
|
||||
@Import({PostgresContainerConfig.class, FlywayConfig.class})
|
||||
@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_CLASS)
|
||||
class DocumentFtsPagedIntegrationTest {
|
||||
|
||||
@Autowired DocumentRepository documentRepository;
|
||||
@Autowired EntityManager em;
|
||||
|
||||
// 60 docs match "Walter"; 10 docs with "Hans" do not.
|
||||
private static final int WALTER_COUNT = 60;
|
||||
private static final int PAGE_SIZE = 50;
|
||||
|
||||
@BeforeEach
|
||||
void seed() {
|
||||
documentRepository.deleteAll();
|
||||
em.flush();
|
||||
for (int i = 0; i < WALTER_COUNT; i++) {
|
||||
documentRepository.saveAndFlush(doc("Brief von Walter Nr. " + i));
|
||||
}
|
||||
for (int i = 0; i < 10; i++) {
|
||||
documentRepository.saveAndFlush(doc("Brief von Hans Nr. " + i));
|
||||
}
|
||||
em.clear();
|
||||
}
|
||||
|
||||
@Test
|
||||
void findFtsPageRaw_firstPage_returnsPageSizeRows() {
|
||||
List<Object[]> rows = documentRepository.findFtsPageRaw("Walter", 0, PAGE_SIZE);
|
||||
|
||||
assertThat(rows).hasSize(PAGE_SIZE);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findFtsPageRaw_windowTotal_equalsFullMatchCount_notPageSize() {
|
||||
List<Object[]> rows = documentRepository.findFtsPageRaw("Walter", 0, PAGE_SIZE);
|
||||
|
||||
long total = ((Number) rows.get(0)[2]).longValue();
|
||||
assertThat(total).isEqualTo(WALTER_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findFtsPageRaw_lastPage_returnsRemainder() {
|
||||
int remainder = WALTER_COUNT % PAGE_SIZE; // 60 % 50 = 10
|
||||
List<Object[]> rows = documentRepository.findFtsPageRaw("Walter", PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
assertThat(rows).hasSize(remainder);
|
||||
long total = ((Number) rows.get(0)[2]).longValue();
|
||||
assertThat(total).isEqualTo(WALTER_COUNT);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findFtsPageRaw_noMatches_returnsEmptyList() {
|
||||
List<Object[]> rows = documentRepository.findFtsPageRaw("XYZ_KEIN_TREFFER", 0, PAGE_SIZE);
|
||||
|
||||
assertThat(rows).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void findFtsPageRaw_stopwordOnlyQuery_returnsEmptyList_noException() {
|
||||
assertThatNoException().isThrownBy(() -> {
|
||||
List<Object[]> rows = documentRepository.findFtsPageRaw("der die das und", 0, PAGE_SIZE);
|
||||
assertThat(rows).isEmpty();
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Helper ───────────────────────────────────────────────────────────────
|
||||
|
||||
private Document doc(String title) {
|
||||
return Document.builder()
|
||||
.title(title)
|
||||
.originalFilename(title.replace(" ", "_") + ".pdf")
|
||||
.status(DocumentStatus.UPLOADED)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
@@ -69,7 +69,7 @@ class DocumentFtsTest {
|
||||
documentRepository.saveAndFlush(document("Alter Brief"));
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Brief");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Brief");
|
||||
|
||||
assertThat(ids).hasSize(1);
|
||||
}
|
||||
@@ -79,7 +79,7 @@ class DocumentFtsTest {
|
||||
documentRepository.saveAndFlush(document("Alter Brief"));
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Briefe");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Briefe");
|
||||
|
||||
assertThat(ids).hasSize(1);
|
||||
}
|
||||
@@ -89,7 +89,7 @@ class DocumentFtsTest {
|
||||
documentRepository.saveAndFlush(document("Ein furchtbarer Brief"));
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("furchtb");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("furchtb");
|
||||
|
||||
assertThat(ids).hasSize(1);
|
||||
}
|
||||
@@ -99,7 +99,7 @@ class DocumentFtsTest {
|
||||
documentRepository.saveAndFlush(document("Familienfoto"));
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Brief");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Brief");
|
||||
|
||||
assertThat(ids).isEmpty();
|
||||
}
|
||||
@@ -115,7 +115,7 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("schreiben");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("schreiben");
|
||||
|
||||
assertThat(ids).contains(doc.getId());
|
||||
}
|
||||
@@ -125,14 +125,14 @@ class DocumentFtsTest {
|
||||
Document doc = documentRepository.saveAndFlush(document("Leeres Dokument"));
|
||||
em.clear();
|
||||
|
||||
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).isEmpty();
|
||||
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).isEmpty();
|
||||
|
||||
UUID annotationId = annotation(doc.getId());
|
||||
blockRepository.saveAndFlush(block(doc.getId(), annotationId, "Grundbuch Eintrag 1923", 0));
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).contains(doc.getId());
|
||||
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).contains(doc.getId());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -144,13 +144,13 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).contains(doc.getId());
|
||||
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).contains(doc.getId());
|
||||
|
||||
blockRepository.deleteById(block.getId());
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
assertThat(documentRepository.findRankedIdsByFts("Grundbuch")).doesNotContain(doc.getId());
|
||||
assertThat(documentRepository.findAllMatchingIdsByFts("Grundbuch")).doesNotContain(doc.getId());
|
||||
}
|
||||
|
||||
// ─── Ranking ───────────────────────────────────────────────────────────────
|
||||
@@ -166,7 +166,7 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Grundbuch");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Grundbuch");
|
||||
|
||||
assertThat(ids).hasSize(2);
|
||||
assertThat(ids.get(0)).isEqualTo(docA.getId());
|
||||
@@ -179,7 +179,7 @@ class DocumentFtsTest {
|
||||
documentRepository.saveAndFlush(document("Ein Brief von der Oma"));
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("der die das und");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("der die das und");
|
||||
|
||||
assertThat(ids).isEmpty();
|
||||
}
|
||||
@@ -195,7 +195,7 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Wille");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Wille");
|
||||
|
||||
assertThat(ids).contains(doc.getId());
|
||||
}
|
||||
@@ -205,7 +205,7 @@ class DocumentFtsTest {
|
||||
documentRepository.saveAndFlush(document("Brief"));
|
||||
em.clear();
|
||||
|
||||
assertThatNoException().isThrownBy(() -> documentRepository.findRankedIdsByFts("((("));
|
||||
assertThatNoException().isThrownBy(() -> documentRepository.findAllMatchingIdsByFts("((("));
|
||||
}
|
||||
|
||||
// ─── Weight C: sender/receiver names ───────────────────────────────────────
|
||||
@@ -223,7 +223,7 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Schmidt");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Schmidt");
|
||||
|
||||
assertThat(ids).contains(doc.getId());
|
||||
}
|
||||
@@ -241,7 +241,7 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Raddatz");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Raddatz");
|
||||
|
||||
assertThat(ids).contains(doc.getId());
|
||||
}
|
||||
@@ -260,7 +260,7 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
List<UUID> ids = documentRepository.findRankedIdsByFts("Familiengeschichte");
|
||||
List<UUID> ids = documentRepository.findAllMatchingIdsByFts("Familiengeschichte");
|
||||
|
||||
assertThat(ids).hasSize(1);
|
||||
}
|
||||
@@ -278,7 +278,7 @@ class DocumentFtsTest {
|
||||
em.flush();
|
||||
em.clear();
|
||||
|
||||
List<UUID> rankedIds = documentRepository.findRankedIdsByFts("Grundbuch");
|
||||
List<UUID> rankedIds = documentRepository.findAllMatchingIdsByFts("Grundbuch");
|
||||
Specification<Document> spec = Specification.where(hasIds(rankedIds))
|
||||
.and(hasStatus(DocumentStatus.UPLOADED));
|
||||
|
||||
|
||||
@@ -21,17 +21,22 @@ import org.springframework.data.domain.Pageable;
|
||||
import org.springframework.data.jpa.domain.Specification;
|
||||
|
||||
import java.time.LocalDate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyInt;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class DocumentServiceSortTest {
|
||||
|
||||
private static final Pageable UNPAGED = org.springframework.data.domain.PageRequest.of(0, 10_000);
|
||||
private static final Pageable PAGE = org.springframework.data.domain.PageRequest.of(0, 10_000);
|
||||
|
||||
@Mock DocumentRepository documentRepository;
|
||||
@Mock PersonService personService;
|
||||
@@ -43,12 +48,12 @@ class DocumentServiceSortTest {
|
||||
@Mock TranscriptionBlockQueryService transcriptionBlockQueryService;
|
||||
@InjectMocks DocumentService documentService;
|
||||
|
||||
// ─── searchDocuments — DATE sort ──────────────────────────────────────────
|
||||
// ─── DATE sort ────────────────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void searchDocuments_with_DATE_sort_and_text_sorts_chronologically_not_by_relevance() {
|
||||
UUID id1 = UUID.randomUUID(); // rank position 0 (higher relevance, older doc)
|
||||
UUID id2 = UUID.randomUUID(); // rank position 1 (lower relevance, newer doc)
|
||||
UUID id1 = UUID.randomUUID(); // higher relevance, older doc
|
||||
UUID id2 = UUID.randomUUID(); // lower relevance, newer doc
|
||||
|
||||
Document older = Document.builder().id(id1)
|
||||
.title("Brief").status(DocumentStatus.UPLOADED)
|
||||
@@ -57,38 +62,48 @@ class DocumentServiceSortTest {
|
||||
.title("Brief").status(DocumentStatus.UPLOADED)
|
||||
.documentDate(LocalDate.of(1960, 1, 1)).build();
|
||||
|
||||
// FTS returns id1 first (higher rank), id2 second
|
||||
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(id1, id2));
|
||||
// findAll(spec, pageable) — the correct date path — returns date-DESC order
|
||||
when(documentRepository.findAllMatchingIdsByFts("Brief")).thenReturn(List.of(id1, id2));
|
||||
when(documentRepository.findAll(any(Specification.class), any(Pageable.class)))
|
||||
.thenReturn(new PageImpl<>(List.of(newer, older)));
|
||||
|
||||
DocumentSearchResult result = documentService.searchDocuments(
|
||||
"Brief", null, null, null, null, null, null, null, DocumentSort.DATE, "DESC", null, UNPAGED);
|
||||
"Brief", null, null, null, null, null, null, null, DocumentSort.DATE, "DESC", null, PAGE);
|
||||
|
||||
// Expect: date order (newer 1960 first), NOT rank order (older 1940 first)
|
||||
assertThat(result.items()).hasSize(2);
|
||||
assertThat(result.items().get(0).document().getId()).isEqualTo(id2); // newer doc first
|
||||
assertThat(result.items().get(0).document().getId()).isEqualTo(id2); // newer first
|
||||
}
|
||||
|
||||
// ─── searchDocuments — RELEVANCE sort ─────────────────────────────────────
|
||||
// ─── RELEVANCE sort — pure text (no filters) ──────────────────────────────
|
||||
|
||||
@Test
|
||||
void searchDocuments_relevance_pureText_calls_findFtsPageRaw_not_findAllMatchingIds() {
|
||||
UUID id1 = UUID.randomUUID();
|
||||
List<Object[]> ftsRows = ftsRows(id1, 0.5d, 1L);
|
||||
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
|
||||
when(documentRepository.findAllById(any()))
|
||||
.thenReturn(List.of(doc(id1)));
|
||||
|
||||
documentService.searchDocuments(
|
||||
"Brief", null, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, PAGE);
|
||||
|
||||
verify(documentRepository).findFtsPageRaw(anyString(), anyInt(), anyInt());
|
||||
verify(documentRepository, never()).findAllMatchingIdsByFts(anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
void searchDocuments_with_RELEVANCE_sort_and_text_preserves_fts_rank_order() {
|
||||
UUID id1 = UUID.randomUUID(); // rank position 0
|
||||
UUID id2 = UUID.randomUUID(); // rank position 1
|
||||
UUID id1 = UUID.randomUUID(); // higher rank — must appear first
|
||||
UUID id2 = UUID.randomUUID(); // lower rank
|
||||
|
||||
Document doc1 = Document.builder().id(id1).title("Brief").status(DocumentStatus.UPLOADED).build();
|
||||
Document doc2 = Document.builder().id(id2).title("Brief").status(DocumentStatus.UPLOADED).build();
|
||||
|
||||
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(id1, id2));
|
||||
when(documentRepository.findAll(any(Specification.class)))
|
||||
.thenReturn(List.of(doc2, doc1)); // unordered from DB
|
||||
List<Object[]> ftsRows = new ArrayList<>();
|
||||
ftsRows.add(new Object[]{id1, 0.8d, 2L});
|
||||
ftsRows.add(new Object[]{id2, 0.3d, 2L});
|
||||
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
|
||||
when(documentRepository.findAllById(any())).thenReturn(List.of(doc(id2), doc(id1))); // unordered from JPA
|
||||
|
||||
DocumentSearchResult result = documentService.searchDocuments(
|
||||
"Brief", null, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, UNPAGED);
|
||||
"Brief", null, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, PAGE);
|
||||
|
||||
// Expect: rank order restored (id1 first)
|
||||
assertThat(result.items().get(0).document().getId()).isEqualTo(id1);
|
||||
}
|
||||
|
||||
@@ -97,16 +112,82 @@ class DocumentServiceSortTest {
|
||||
UUID id1 = UUID.randomUUID();
|
||||
UUID id2 = UUID.randomUUID();
|
||||
|
||||
Document doc1 = Document.builder().id(id1).title("Brief").status(DocumentStatus.UPLOADED).build();
|
||||
Document doc2 = Document.builder().id(id2).title("Brief").status(DocumentStatus.UPLOADED).build();
|
||||
|
||||
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(id1, id2));
|
||||
when(documentRepository.findAll(any(Specification.class)))
|
||||
.thenReturn(List.of(doc2, doc1));
|
||||
List<Object[]> ftsRows = new ArrayList<>();
|
||||
ftsRows.add(new Object[]{id1, 0.8d, 2L});
|
||||
ftsRows.add(new Object[]{id2, 0.3d, 2L});
|
||||
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
|
||||
when(documentRepository.findAllById(any())).thenReturn(List.of(doc(id2), doc(id1)));
|
||||
|
||||
DocumentSearchResult result = documentService.searchDocuments(
|
||||
"Brief", null, null, null, null, null, null, null, null, null, null, UNPAGED);
|
||||
"Brief", null, null, null, null, null, null, null, null, null, null, PAGE);
|
||||
|
||||
assertThat(result.items().get(0).document().getId()).isEqualTo(id1);
|
||||
}
|
||||
|
||||
// ─── RELEVANCE sort — overflow guard ─────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void searchDocuments_relevance_returns_empty_when_offset_exceeds_maxInt() {
|
||||
// offset = pageNumber * pageSize; choose values so offset > Integer.MAX_VALUE
|
||||
Pageable hugePage = org.springframework.data.domain.PageRequest.of(Integer.MAX_VALUE / 10 + 1, 10);
|
||||
|
||||
DocumentSearchResult result = documentService.searchDocuments(
|
||||
"Brief", null, null, null, null, null, null, null,
|
||||
DocumentSort.RELEVANCE, null, null, hugePage);
|
||||
|
||||
assertThat(result.items()).isEmpty();
|
||||
verify(documentRepository, never()).findFtsPageRaw(anyString(), anyInt(), anyInt());
|
||||
}
|
||||
|
||||
// ─── toFtsPage — UUID-as-String JDBC driver variance ────────────────────
|
||||
|
||||
@Test
|
||||
void searchDocuments_relevance_handles_string_uuid_from_jdbc_driver() {
|
||||
String stringId = "11111111-1111-1111-1111-111111111111";
|
||||
UUID uuidId = UUID.fromString(stringId);
|
||||
// Simulate a JDBC driver that returns the id column as String instead of UUID
|
||||
List<Object[]> ftsRows = new ArrayList<>();
|
||||
ftsRows.add(new Object[]{stringId, 0.5d, 1L});
|
||||
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
|
||||
when(documentRepository.findAllById(any())).thenReturn(List.of(doc(uuidId)));
|
||||
|
||||
DocumentSearchResult result = documentService.searchDocuments(
|
||||
"Brief", null, null, null, null, null, null, null,
|
||||
DocumentSort.RELEVANCE, null, null, PAGE);
|
||||
|
||||
assertThat(result.items()).hasSize(1);
|
||||
assertThat(result.items().get(0).document().getId()).isEqualTo(uuidId);
|
||||
}
|
||||
|
||||
// ─── RELEVANCE sort — text + active filter ────────────────────────────────
|
||||
|
||||
@Test
|
||||
void searchDocuments_relevance_with_active_filter_uses_inMemory_path() {
|
||||
UUID id1 = UUID.randomUUID();
|
||||
UUID id2 = UUID.randomUUID();
|
||||
|
||||
when(documentRepository.findAllMatchingIdsByFts("Brief")).thenReturn(List.of(id1, id2));
|
||||
when(documentRepository.findAll(any(Specification.class)))
|
||||
.thenReturn(List.of(doc(id2), doc(id1)));
|
||||
|
||||
// sender filter is active → triggers in-memory path, not findFtsPageRaw
|
||||
LocalDate from = LocalDate.of(1900, 1, 1);
|
||||
documentService.searchDocuments(
|
||||
"Brief", from, null, null, null, null, null, null, DocumentSort.RELEVANCE, null, null, PAGE);
|
||||
|
||||
verify(documentRepository, never()).findFtsPageRaw(anyString(), anyInt(), anyInt());
|
||||
verify(documentRepository).findAllMatchingIdsByFts("Brief");
|
||||
}
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────
|
||||
|
||||
private static Document doc(UUID id) {
|
||||
return Document.builder().id(id).title("Brief").status(DocumentStatus.UPLOADED).build();
|
||||
}
|
||||
|
||||
private static List<Object[]> ftsRows(UUID id, double rank, long total) {
|
||||
List<Object[]> rows = new ArrayList<>();
|
||||
rows.add(new Object[]{id, rank, total});
|
||||
return rows;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ import org.springframework.data.domain.PageImpl;
|
||||
import org.springframework.data.domain.PageRequest;
|
||||
import org.springframework.data.domain.Pageable;
|
||||
import org.springframework.data.domain.Sort;
|
||||
import org.springframework.data.jpa.domain.Specification;
|
||||
import org.springframework.mock.web.MockMultipartFile;
|
||||
|
||||
import java.time.LocalDate;
|
||||
@@ -1402,6 +1403,21 @@ class DocumentServiceTest {
|
||||
assertThat(result.items()).hasSize(1); // only the slice is enriched
|
||||
}
|
||||
|
||||
@Test
|
||||
void searchDocuments_UPDATED_AT_sort_resolves_to_updatedAt_field() {
|
||||
ArgumentCaptor<Pageable> captor = ArgumentCaptor.forClass(Pageable.class);
|
||||
when(documentRepository.findAll(any(org.springframework.data.jpa.domain.Specification.class), any(Pageable.class)))
|
||||
.thenReturn(new PageImpl<>(List.of()));
|
||||
|
||||
documentService.searchDocuments(null, null, null, null, null, null, null, null,
|
||||
DocumentSort.UPDATED_AT, "DESC", null,
|
||||
org.springframework.data.domain.PageRequest.of(0, 5));
|
||||
|
||||
verify(documentRepository).findAll(any(org.springframework.data.jpa.domain.Specification.class), captor.capture());
|
||||
assertThat(captor.getValue().getSort())
|
||||
.isEqualTo(Sort.by(Sort.Direction.DESC, "updatedAt"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void searchDocuments_senderSort_slicesInMemoryAndReportsFullTotal() {
|
||||
// Fixture: 120 docs with senders; request page 1, size 50 → expect 50 items
|
||||
@@ -1604,9 +1620,10 @@ class DocumentServiceTest {
|
||||
// chr(1)=\u0001 marks start, chr(2)=\u0002 marks end of highlighted term
|
||||
List<Object[]> rows = Collections.singletonList(new Object[]{docId, "\u0001Brief\u0002 an Anna", null, false, null, null, null});
|
||||
|
||||
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(docId));
|
||||
when(documentRepository.findAll(any(org.springframework.data.jpa.domain.Specification.class)))
|
||||
.thenReturn(List.of(doc));
|
||||
List<Object[]> ftsRows = new java.util.ArrayList<>();
|
||||
ftsRows.add(new Object[]{docId, 0.5d, 1L});
|
||||
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(ftsRows);
|
||||
when(documentRepository.findAllById(any())).thenReturn(List.of(doc));
|
||||
when(documentRepository.findEnrichmentData(any(), eq("Brief"))).thenReturn(rows);
|
||||
|
||||
DocumentSearchResult result = documentService.searchDocuments(
|
||||
@@ -1638,9 +1655,10 @@ class DocumentServiceTest {
|
||||
String snippetHeadline = "Hier ist der \u0001Brief\u0002 aus Berlin";
|
||||
List<Object[]> rows = Collections.singletonList(new Object[]{docId, "Dok", snippetHeadline, false, null, null, null});
|
||||
|
||||
when(documentRepository.findRankedIdsByFts("Brief")).thenReturn(List.of(docId));
|
||||
when(documentRepository.findAll(any(org.springframework.data.jpa.domain.Specification.class)))
|
||||
.thenReturn(List.of(doc));
|
||||
List<Object[]> snippetFtsRows = new java.util.ArrayList<>();
|
||||
snippetFtsRows.add(new Object[]{docId, 0.5d, 1L});
|
||||
when(documentRepository.findFtsPageRaw(anyString(), anyInt(), anyInt())).thenReturn(snippetFtsRows);
|
||||
when(documentRepository.findAllById(any())).thenReturn(List.of(doc));
|
||||
when(documentRepository.findEnrichmentData(any(), eq("Brief"))).thenReturn(rows);
|
||||
|
||||
DocumentSearchResult result = documentService.searchDocuments(
|
||||
@@ -2186,7 +2204,7 @@ class DocumentServiceTest {
|
||||
|
||||
@Test
|
||||
void findIdsForFilter_returnsEmpty_whenFtsHasNoMatches() {
|
||||
when(documentRepository.findRankedIdsByFts("xyz")).thenReturn(List.of());
|
||||
when(documentRepository.findAllMatchingIdsByFts("xyz")).thenReturn(List.of());
|
||||
|
||||
List<UUID> result = documentService.findIdsForFilter(
|
||||
"xyz", null, null, null, null, null, null, null, null);
|
||||
@@ -2321,4 +2339,61 @@ class DocumentServiceTest {
|
||||
assertThat(documentService.save(doc)).isEqualTo(doc);
|
||||
verify(documentRepository).save(doc);
|
||||
}
|
||||
|
||||
// ─── getDensity ────────────────────────────────────────────────────────────
|
||||
|
||||
private static DensityFilters anyFilters() {
|
||||
return new DensityFilters(null, null, null, null, null, null, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_returnsEmptyResult_whenNoDocumentsMatch() {
|
||||
when(documentRepository.findAll(any(Specification.class))).thenReturn(List.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(anyFilters());
|
||||
|
||||
assertThat(result.buckets()).isEmpty();
|
||||
assertThat(result.minDate()).isNull();
|
||||
assertThat(result.maxDate()).isNull();
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_groupsMatchingDocumentsByMonth() {
|
||||
Document a = Document.builder().documentDate(LocalDate.of(1915, 8, 3)).build();
|
||||
Document b = Document.builder().documentDate(LocalDate.of(1915, 8, 17)).build();
|
||||
Document c = Document.builder().documentDate(LocalDate.of(1915, 9, 1)).build();
|
||||
when(documentRepository.findAll(any(Specification.class))).thenReturn(List.of(a, b, c));
|
||||
when(tagService.expandTagNamesToDescendantIdSets(any())).thenReturn(List.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(anyFilters());
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::month)
|
||||
.containsExactly("1915-08", "1915-09");
|
||||
assertThat(result.buckets()).extracting(MonthBucket::count).containsExactly(2, 1);
|
||||
assertThat(result.minDate()).isEqualTo(LocalDate.of(1915, 8, 3));
|
||||
assertThat(result.maxDate()).isEqualTo(LocalDate.of(1915, 9, 1));
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_excludesDocumentsWithNullDate() {
|
||||
Document dated = Document.builder().documentDate(LocalDate.of(1915, 8, 3)).build();
|
||||
Document undated = Document.builder().documentDate(null).build();
|
||||
when(documentRepository.findAll(any(Specification.class))).thenReturn(List.of(dated, undated));
|
||||
when(tagService.expandTagNamesToDescendantIdSets(any())).thenReturn(List.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(anyFilters());
|
||||
|
||||
assertThat(result.buckets()).extracting(MonthBucket::count).containsExactly(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void getDensity_shortCircuits_whenFtsReturnsNoMatches() {
|
||||
when(documentRepository.findAllMatchingIdsByFts("xyz")).thenReturn(List.of());
|
||||
|
||||
DocumentDensityResult result = documentService.getDensity(
|
||||
new DensityFilters("xyz", null, null, null, null, null, null));
|
||||
|
||||
assertThat(result.buckets()).isEmpty();
|
||||
verify(documentRepository, org.mockito.Mockito.never()).findAll(any(Specification.class));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,14 @@ class CommentControllerTest {
|
||||
|
||||
// ─── Block comment endpoints ─────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
@WithMockUser
|
||||
void getBlockComments_returns400_when_documentId_is_not_a_UUID() throws Exception {
|
||||
UUID blockId = UUID.randomUUID();
|
||||
mockMvc.perform(get("/api/documents/NOT-A-UUID/transcription-blocks/" + blockId + "/comments"))
|
||||
.andExpect(status().isBadRequest());
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser
|
||||
void getBlockComments_returns200() throws Exception {
|
||||
@@ -115,6 +123,15 @@ class CommentControllerTest {
|
||||
|
||||
// ─── Block reply endpoints ───────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
@WithMockUser(authorities = "ANNOTATE_ALL")
|
||||
void replyToBlockComment_returns400_when_blockId_is_not_a_UUID() throws Exception {
|
||||
mockMvc.perform(post("/api/documents/" + DOC_ID + "/transcription-blocks/NOT-A-UUID"
|
||||
+ "/comments/" + COMMENT_ID + "/replies")
|
||||
.contentType(MediaType.APPLICATION_JSON).content(COMMENT_JSON))
|
||||
.andExpect(status().isBadRequest());
|
||||
}
|
||||
|
||||
@Test
|
||||
void replyToBlockComment_returns401_whenUnauthenticated() throws Exception {
|
||||
UUID blockId = UUID.randomUUID();
|
||||
|
||||
@@ -19,6 +19,7 @@ import org.raddatz.familienarchiv.notification.NotificationService;
|
||||
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
@@ -644,62 +645,99 @@ class CommentServiceTest {
|
||||
verify(auditService, never()).logAfterCommit(eq(AuditKind.MENTION_CREATED), any(), any(), any());
|
||||
}
|
||||
|
||||
// ─── findAnnotationIdsByIds ───────────────────────────────────────────────
|
||||
// ─── findDataByIds ────────────────────────────────────────────────────────
|
||||
|
||||
@Test
|
||||
void findAnnotationIdsByIds_returnsMap_forKnownIds() {
|
||||
UUID commentA = UUID.randomUUID();
|
||||
UUID annotationA = UUID.randomUUID();
|
||||
UUID commentB = UUID.randomUUID();
|
||||
UUID annotationB = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(commentA, commentB)))
|
||||
.thenReturn(List.of(
|
||||
DocumentComment.builder().id(commentA).annotationId(annotationA).build(),
|
||||
DocumentComment.builder().id(commentB).annotationId(annotationB).build()
|
||||
));
|
||||
|
||||
assertThat(commentService.findAnnotationIdsByIds(List.of(commentA, commentB)))
|
||||
.containsOnly(
|
||||
java.util.Map.entry(commentA, annotationA),
|
||||
java.util.Map.entry(commentB, annotationB)
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findAnnotationIdsByIds_returnsEmptyMap_forEmptyInput() {
|
||||
assertThat(commentService.findAnnotationIdsByIds(List.of())).isEmpty();
|
||||
void findDataByIds_returns_empty_map_when_input_is_empty() {
|
||||
assertThat(commentService.findDataByIds(List.of())).isEmpty();
|
||||
verify(commentRepository, never()).findAllById(anyList());
|
||||
}
|
||||
|
||||
@Test
|
||||
void findAnnotationIdsByIds_omitsUnknownIds() {
|
||||
UUID known = UUID.randomUUID();
|
||||
UUID knownAnnotation = UUID.randomUUID();
|
||||
UUID missing = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(known, missing)))
|
||||
.thenReturn(List.of(
|
||||
DocumentComment.builder().id(known).annotationId(knownAnnotation).build()
|
||||
));
|
||||
void findDataByIds_strips_html_and_extracts_plain_text() {
|
||||
UUID id = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(id)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(id)
|
||||
.content("<p><strong>Hello</strong> world</p>").build()));
|
||||
|
||||
assertThat(commentService.findAnnotationIdsByIds(List.of(known, missing)))
|
||||
.containsOnly(java.util.Map.entry(known, knownAnnotation))
|
||||
.doesNotContainKey(missing);
|
||||
Map<UUID, CommentData> result = commentService.findDataByIds(List.of(id));
|
||||
|
||||
assertThat(result.get(id).preview()).isEqualTo("Hello world");
|
||||
}
|
||||
|
||||
@Test
|
||||
void findAnnotationIdsByIds_omitsCommentsWithNullAnnotationId() {
|
||||
UUID legacy = UUID.randomUUID();
|
||||
UUID block = UUID.randomUUID();
|
||||
UUID annotation = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(legacy, block)))
|
||||
.thenReturn(List.of(
|
||||
DocumentComment.builder().id(legacy).annotationId(null).build(),
|
||||
DocumentComment.builder().id(block).annotationId(annotation).build()
|
||||
));
|
||||
void findDataByIds_truncates_at_exactly_120_chars() {
|
||||
UUID id = UUID.randomUUID();
|
||||
String text121 = "a".repeat(121);
|
||||
when(commentRepository.findAllById(List.of(id)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(id).content(text121).build()));
|
||||
|
||||
assertThat(commentService.findAnnotationIdsByIds(List.of(legacy, block)))
|
||||
.containsOnly(java.util.Map.entry(block, annotation))
|
||||
.doesNotContainKey(legacy);
|
||||
assertThat(commentService.findDataByIds(List.of(id)).get(id).preview()).hasSize(120);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findDataByIds_preserves_content_at_exactly_120_chars() {
|
||||
UUID id = UUID.randomUUID();
|
||||
String text120 = "a".repeat(120);
|
||||
when(commentRepository.findAllById(List.of(id)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(id).content(text120).build()));
|
||||
|
||||
assertThat(commentService.findDataByIds(List.of(id)).get(id).preview()).hasSize(120);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findDataByIds_returns_empty_string_for_blank_content() {
|
||||
UUID id = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(id)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(id).content(" ").build()));
|
||||
|
||||
assertThat(commentService.findDataByIds(List.of(id)).get(id).preview()).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void findDataByIds_returns_empty_string_for_null_content() {
|
||||
UUID id = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(id)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(id).content(null).build()));
|
||||
|
||||
assertThat(commentService.findDataByIds(List.of(id)).get(id).preview()).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void findDataByIds_omits_deleted_comments_from_result_map() {
|
||||
UUID present = UUID.randomUUID();
|
||||
UUID deleted = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(present, deleted)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(present).content("Hi").build()));
|
||||
|
||||
Map<UUID, CommentData> result = commentService.findDataByIds(List.of(present, deleted));
|
||||
|
||||
assertThat(result).containsKey(present);
|
||||
assertThat(result).doesNotContainKey(deleted);
|
||||
}
|
||||
|
||||
@Test
|
||||
void findDataByIds_preserves_annotationId_alongside_preview() {
|
||||
UUID id = UUID.randomUUID();
|
||||
UUID annotationId = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(id)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(id)
|
||||
.annotationId(annotationId).content("Text").build()));
|
||||
|
||||
CommentData data = commentService.findDataByIds(List.of(id)).get(id);
|
||||
|
||||
assertThat(data.annotationId()).isEqualTo(annotationId);
|
||||
assertThat(data.preview()).isEqualTo("Text");
|
||||
}
|
||||
|
||||
@Test
|
||||
void findDataByIds_sets_null_annotationId_when_comment_has_no_annotation() {
|
||||
UUID id = UUID.randomUUID();
|
||||
when(commentRepository.findAllById(List.of(id)))
|
||||
.thenReturn(List.of(DocumentComment.builder().id(id)
|
||||
.annotationId(null).content("Text").build()));
|
||||
|
||||
assertThat(commentService.findDataByIds(List.of(id)).get(id).annotationId()).isNull();
|
||||
}
|
||||
|
||||
private void stubBlock(UUID docId, UUID blockId) {
|
||||
|
||||
@@ -159,6 +159,26 @@ class GeschichteServiceIntegrationTest {
|
||||
.isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
void list_DRAFT_does_not_return_other_users_drafts() {
|
||||
// writer creates a draft; writer2 (also BLOG_WRITE) should not see it
|
||||
AppUser writer2 = appUserRepository.save(AppUser.builder()
|
||||
.email("writer2-int@test")
|
||||
.password("hash")
|
||||
.build());
|
||||
|
||||
authenticateAs(writer, Permission.BLOG_WRITE);
|
||||
GeschichteUpdateDTO dto = new GeschichteUpdateDTO();
|
||||
dto.setTitle("Writer 1 draft");
|
||||
dto.setBody("<p>private</p>");
|
||||
geschichteService.create(dto);
|
||||
|
||||
authenticateAs(writer2, Permission.BLOG_WRITE);
|
||||
List<Geschichte> result = geschichteService.list(GeschichteStatus.DRAFT, List.of(), null, 50);
|
||||
|
||||
assertThat(result).isEmpty();
|
||||
}
|
||||
|
||||
private UUID publishedStoryWithPersons(String title, List<UUID> personIds) {
|
||||
GeschichteUpdateDTO dto = new GeschichteUpdateDTO();
|
||||
dto.setTitle(title);
|
||||
|
||||
@@ -81,6 +81,29 @@ class PersonControllerTest {
|
||||
.andExpect(jsonPath("$[0].firstName").value("Hans"));
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser(authorities = "READ_ALL")
|
||||
void getPersons_delegatesTopByDocumentCount_whenSortAndSizeGiven() throws Exception {
|
||||
PersonSummaryDTO top = mockPersonSummary("Käthe", "Raddatz");
|
||||
when(personService.findTopByDocumentCount(4)).thenReturn(List.of(top));
|
||||
|
||||
mockMvc.perform(get("/api/persons").param("sort", "documentCount").param("size", "4"))
|
||||
.andExpect(status().isOk())
|
||||
.andExpect(jsonPath("$[0].firstName").value("Käthe"));
|
||||
}
|
||||
|
||||
@Test
|
||||
@WithMockUser(authorities = "READ_ALL")
|
||||
void getPersons_capsTopByDocumentCount_atFifty() throws Exception {
|
||||
ArgumentCaptor<Integer> sizeCaptor = ArgumentCaptor.forClass(Integer.class);
|
||||
when(personService.findTopByDocumentCount(sizeCaptor.capture())).thenReturn(Collections.emptyList());
|
||||
|
||||
mockMvc.perform(get("/api/persons").param("sort", "documentCount").param("size", "999"))
|
||||
.andExpect(status().isOk());
|
||||
|
||||
assertThat(sizeCaptor.getValue()).isEqualTo(50);
|
||||
}
|
||||
|
||||
private PersonSummaryDTO mockPersonSummary(String firstName, String lastName) {
|
||||
return new PersonSummaryDTO() {
|
||||
public java.util.UUID getId() { return UUID.randomUUID(); }
|
||||
|
||||
@@ -35,4 +35,15 @@ class AppUserTest {
|
||||
.count();
|
||||
assertThat(distinct).isGreaterThan(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
void computeColor_returnsValidPaletteColorForIntegerMinValueHash() {
|
||||
// UUID "80000000-0000-0000-0000-000000000000" has hashCode() == Integer.MIN_VALUE.
|
||||
// Math.abs(Integer.MIN_VALUE) overflows back to Integer.MIN_VALUE (negative), making
|
||||
// Math.abs(hashCode()) % n unsafe for palette sizes that don't evenly divide MIN_VALUE.
|
||||
// Math.floorMod eliminates this edge case entirely.
|
||||
UUID minHashId = UUID.fromString("80000000-0000-0000-0000-000000000000");
|
||||
assertThat(minHashId.hashCode()).isEqualTo(Integer.MIN_VALUE);
|
||||
assertThat(EXPECTED_PALETTE).contains(AppUser.computeColor(minHashId));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -902,4 +902,18 @@ class UserServiceTest {
|
||||
assertThat(result.getName()).isEqualTo("Familie");
|
||||
assertThat(result.getPermissions()).containsExactlyInAnyOrder("READ_ALL", "WRITE_ALL");
|
||||
}
|
||||
|
||||
@Test
|
||||
void createGroup_withNullPermissions_savesGroupWithEmptyPermissionSet() {
|
||||
org.raddatz.familienarchiv.user.GroupDTO dto = new org.raddatz.familienarchiv.user.GroupDTO();
|
||||
dto.setName("Leser");
|
||||
dto.setPermissions(null);
|
||||
|
||||
UserGroup saved = UserGroup.builder().id(UUID.randomUUID()).name("Leser").build();
|
||||
when(groupRepository.save(any())).thenReturn(saved);
|
||||
|
||||
userService.createGroup(dto);
|
||||
|
||||
verify(groupRepository).save(argThat(g -> g.getPermissions() != null && g.getPermissions().isEmpty()));
|
||||
}
|
||||
}
|
||||
|
||||
231
docker-compose.prod.yml
Normal file
231
docker-compose.prod.yml
Normal file
@@ -0,0 +1,231 @@
|
||||
# Production / staging Docker Compose for Familienarchiv.
|
||||
#
|
||||
# This is a self-contained file (not an overlay over docker-compose.yml).
|
||||
# All services for the prod stack live here. Environment isolation is
|
||||
# achieved via the docker compose project name:
|
||||
#
|
||||
# production: docker compose -f docker-compose.prod.yml -p archiv-production ...
|
||||
# staging: docker compose -f docker-compose.prod.yml -p archiv-staging --profile staging ...
|
||||
#
|
||||
# Volumes, networks and containers are namespaced by the project name,
|
||||
# so the two environments cohabit cleanly on the same host.
|
||||
#
|
||||
# Required env vars (provided by .env.production / .env.staging in CI):
|
||||
# TAG image tag (release tag or "nightly")
|
||||
# PORT_BACKEND, PORT_FRONTEND host-side ports (bound to 127.0.0.1 only)
|
||||
# APP_DOMAIN e.g. archiv.raddatz.cloud / staging.raddatz.cloud
|
||||
# POSTGRES_PASSWORD Postgres password
|
||||
# MINIO_PASSWORD MinIO root password (admin operations only)
|
||||
# MINIO_APP_PASSWORD MinIO application service-account password
|
||||
# (least-privilege scope: archive bucket only)
|
||||
# OCR_TRAINING_TOKEN token guarding ocr-service /train endpoint
|
||||
# APP_ADMIN_USERNAME seeded admin email (e.g. admin@archiv.raddatz.cloud)
|
||||
# APP_ADMIN_PASSWORD seeded admin password — CRITICAL: locked in on
|
||||
# first deploy because UserDataInitializer only
|
||||
# creates the account if the email does not exist
|
||||
# MAIL_HOST, MAIL_PORT, SMTP relay (production only; staging uses mailpit)
|
||||
# MAIL_USERNAME, MAIL_PASSWORD
|
||||
# APP_MAIL_FROM sender address (e.g. noreply@raddatz.cloud)
|
||||
|
||||
networks:
|
||||
archiv-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
minio-data:
|
||||
ocr-models:
|
||||
ocr-cache:
|
||||
|
||||
services:
|
||||
db:
|
||||
image: postgres:16-alpine
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: archiv
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_DB: archiv
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U archiv -d archiv"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
minio:
|
||||
# Pinned MinIO release for reproducible deploys. Bumped manually until
|
||||
# Renovate is bootstrapped for these production images (see follow-up issue).
|
||||
image: minio/minio:RELEASE.2025-02-28T09-55-16Z
|
||||
restart: unless-stopped
|
||||
command: server /data --console-address ":9001"
|
||||
environment:
|
||||
MINIO_ROOT_USER: archiv
|
||||
MINIO_ROOT_PASSWORD: ${MINIO_PASSWORD}
|
||||
volumes:
|
||||
- minio-data:/data
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
# Idempotent bucket bootstrap + service-account creation.
|
||||
# Runs once per `docker compose up` and exits 0. The entrypoint is
|
||||
# extracted to infra/minio/bootstrap.sh so the (non-trivial) idempotent
|
||||
# logic is readable, reviewable, and unit-testable as a script rather
|
||||
# than YAML-escaped shell.
|
||||
create-buckets:
|
||||
# Custom image bakes bootstrap.sh in at build time. A bind-mount fails on
|
||||
# the Docker-out-of-Docker production runner because the host daemon
|
||||
# resolves the relative path against the host filesystem, not the
|
||||
# runner container's CWD. See #506 + infra/minio/Dockerfile.
|
||||
build:
|
||||
context: ./infra/minio
|
||||
# Declare one-shot intent so `docker compose up -d --wait` treats
|
||||
# exited(0) as success rather than "not running, fail". Pair with
|
||||
# backend's `service_completed_successfully` dependency below. See #510.
|
||||
restart: "no"
|
||||
depends_on:
|
||||
minio:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- archiv-net
|
||||
environment:
|
||||
MINIO_PASSWORD: ${MINIO_PASSWORD}
|
||||
MINIO_APP_PASSWORD: ${MINIO_APP_PASSWORD}
|
||||
|
||||
# Dev-only mail catcher; gated behind the staging profile so production
|
||||
# never starts it. Staging workflow runs with `--profile staging`.
|
||||
mailpit:
|
||||
# Pinned for reproducibility; bumped manually until Renovate is bootstrapped.
|
||||
image: axllent/mailpit:v1.29.7
|
||||
restart: unless-stopped
|
||||
profiles: ["staging"]
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
# TCP-port open check via BusyBox `nc`. The previous wget-based probe
|
||||
# introduced a non-obvious binary dependency on the mailpit image; a
|
||||
# future tag that ships without wget would silently disable the
|
||||
# healthcheck. `nc` is part of BusyBox in the upstream image.
|
||||
test: ["CMD-SHELL", "nc -z localhost 8025 || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
ocr-service:
|
||||
build:
|
||||
context: ./ocr-service
|
||||
restart: unless-stopped
|
||||
expose:
|
||||
- "8000"
|
||||
# Surya OCR loads ~5GB of transformer models at startup; first request
|
||||
# triggers a further ~1GB Kraken model download into ocr-cache.
|
||||
# CX42+ (16 GB RAM) honours the default. On a CX32 (8 GB) override with
|
||||
# OCR_MEM_LIMIT=6g (slower first-request, fits the host).
|
||||
mem_limit: ${OCR_MEM_LIMIT:-12g}
|
||||
memswap_limit: ${OCR_MEM_LIMIT:-12g}
|
||||
volumes:
|
||||
- ocr-models:/app/models
|
||||
- ocr-cache:/root/.cache
|
||||
environment:
|
||||
KRAKEN_MODEL_PATH: /app/models/german_kurrent.mlmodel
|
||||
TRAINING_TOKEN: ${OCR_TRAINING_TOKEN}
|
||||
OCR_CONFIDENCE_THRESHOLD: "0.3"
|
||||
OCR_CONFIDENCE_THRESHOLD_KURRENT: "0.5"
|
||||
# SSRF allowlist pinned explicitly to the internal MinIO hostname.
|
||||
# In prod the OCR service only fetches PDFs from MinIO over the
|
||||
# docker network; localhost/127.0.0.1 are dev-only sources and
|
||||
# must NOT be reachable here. Do not widen to `*`.
|
||||
ALLOWED_PDF_HOSTS: "minio"
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
start_period: 120s
|
||||
|
||||
backend:
|
||||
image: familienarchiv/backend:${TAG:-nightly}
|
||||
build:
|
||||
context: ./backend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
minio:
|
||||
condition: service_healthy
|
||||
ocr-service:
|
||||
condition: service_healthy
|
||||
# Gate startup on the bucket bootstrap. Without this, backend
|
||||
# starts in parallel with create-buckets and may race the policy
|
||||
# bind. Also tells compose's `up -d --wait` that create-buckets
|
||||
# is a one-shot that must complete successfully. See #510.
|
||||
create-buckets:
|
||||
condition: service_completed_successfully
|
||||
# Bound to localhost only — Caddy fronts external traffic.
|
||||
ports:
|
||||
- "127.0.0.1:${PORT_BACKEND}:8080"
|
||||
environment:
|
||||
SPRING_DATASOURCE_URL: jdbc:postgresql://db:5432/archiv
|
||||
SPRING_DATASOURCE_USERNAME: archiv
|
||||
SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
# Application uses the bucket-scoped service account, not MinIO root.
|
||||
S3_ENDPOINT: http://minio:9000
|
||||
S3_ACCESS_KEY: archiv-app
|
||||
S3_SECRET_KEY: ${MINIO_APP_PASSWORD}
|
||||
S3_BUCKET_NAME: familienarchiv
|
||||
S3_REGION: us-east-1
|
||||
# No SPRING_PROFILES_ACTIVE — base application.yaml is production-ready
|
||||
# (Swagger disabled, show-sql off, open-in-view false).
|
||||
APP_BASE_URL: https://${APP_DOMAIN}
|
||||
APP_ADMIN_USERNAME: ${APP_ADMIN_USERNAME}
|
||||
APP_ADMIN_PASSWORD: ${APP_ADMIN_PASSWORD}
|
||||
APP_OCR_BASE_URL: http://ocr-service:8000
|
||||
APP_OCR_TRAINING_TOKEN: ${OCR_TRAINING_TOKEN}
|
||||
MAIL_HOST: ${MAIL_HOST}
|
||||
MAIL_PORT: ${MAIL_PORT:-587}
|
||||
MAIL_USERNAME: ${MAIL_USERNAME:-}
|
||||
MAIL_PASSWORD: ${MAIL_PASSWORD:-}
|
||||
APP_MAIL_FROM: ${APP_MAIL_FROM:-noreply@raddatz.cloud}
|
||||
SPRING_MAIL_PROPERTIES_MAIL_SMTP_AUTH: ${MAIL_SMTP_AUTH:-true}
|
||||
SPRING_MAIL_PROPERTIES_MAIL_SMTP_STARTTLS_ENABLE: ${MAIL_STARTTLS_ENABLE:-true}
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:8080/actuator/health | grep -q UP || exit 1"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
|
||||
frontend:
|
||||
image: familienarchiv/frontend:${TAG:-nightly}
|
||||
build:
|
||||
context: ./frontend
|
||||
target: production
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:${PORT_FRONTEND}:3000"
|
||||
environment:
|
||||
# SSR fetches go inside the docker network; clients hit https://${APP_DOMAIN}
|
||||
API_INTERNAL_URL: http://backend:8080
|
||||
ORIGIN: https://${APP_DOMAIN}
|
||||
networks:
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:3000/login >/dev/null 2>&1 || exit 1"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
@@ -13,7 +13,7 @@ services:
|
||||
ports:
|
||||
- "${PORT_DB}:5432"
|
||||
networks:
|
||||
- archive-net
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
|
||||
interval: 5s
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- "${PORT_MINIO_API}:9000" # API Port
|
||||
- "${PORT_MINIO_CONSOLE}:9001" # Web-Oberfläche
|
||||
networks:
|
||||
- archive-net
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
exit 0;
|
||||
"
|
||||
networks:
|
||||
- archive-net
|
||||
- archiv-net
|
||||
|
||||
# --- Mail catcher: Mailpit (dev only) ---
|
||||
# Catches all outgoing emails and displays them in a web UI.
|
||||
@@ -69,7 +69,7 @@ services:
|
||||
- "${PORT_MAILPIT_UI:-8025}:8025" # Web UI
|
||||
- "${PORT_MAILPIT_SMTP:-1025}:1025" # SMTP
|
||||
networks:
|
||||
- archive-net
|
||||
- archiv-net
|
||||
|
||||
# --- OCR: Python microservice (Surya + Kraken) ---
|
||||
# Single-node only: OCR training reloads the model in-process after each run.
|
||||
@@ -99,7 +99,7 @@ services:
|
||||
OCR_CLAHE_TILE_SIZE: "8" # CLAHE tile grid size (NxN tiles per page)
|
||||
OCR_MAX_CACHED_MODELS: "2" # LRU cache; each model ~500 MB, so 2 = ~1 GB resident
|
||||
networks:
|
||||
- archive-net
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
||||
interval: 10s
|
||||
@@ -150,7 +150,7 @@ services:
|
||||
ports:
|
||||
- "${PORT_BACKEND}:8080"
|
||||
networks:
|
||||
- archive-net
|
||||
- archiv-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO- http://localhost:8080/actuator/health | grep -q UP || exit 1"]
|
||||
interval: 15s
|
||||
@@ -163,6 +163,7 @@ services:
|
||||
build:
|
||||
context: ./frontend
|
||||
dockerfile: Dockerfile
|
||||
target: development # Dockerfile is multi-stage; default would be the production stage
|
||||
container_name: archive-frontend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
@@ -184,10 +185,10 @@ services:
|
||||
ports:
|
||||
- "${PORT_FRONTEND}:5173"
|
||||
networks:
|
||||
- archive-net
|
||||
- archiv-net
|
||||
|
||||
networks:
|
||||
archive-net:
|
||||
archiv-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
|
||||
146
docs/ARCHITECTURE.md
Normal file
146
docs/ARCHITECTURE.md
Normal file
@@ -0,0 +1,146 @@
|
||||
<!-- Last reviewed: 2026-05-05 -->
|
||||
|
||||
# Familienarchiv — Architecture
|
||||
|
||||
**Target reader:** a PM-with-CS background who has read the README.
|
||||
**Goal:** accurate mental model after one read — enough to sketch the system on a whiteboard.
|
||||
|
||||
For domain terminology, see [docs/GLOSSARY.md](GLOSSARY.md).
|
||||
For security policies and hardening, see [docs/security-guide.md](security-guide.md).
|
||||
For low-level ADR details, see [docs/adr/](adr/).
|
||||
|
||||
---
|
||||
|
||||
## 1. High-level diagram
|
||||
|
||||
The updated container diagram below shows all six deployable units and their communication paths.
|
||||
|
||||
See [docs/architecture/c4-diagrams.md](architecture/c4-diagrams.md) for the full C4 L1/L2/L3 diagrams (Mermaid, Gitea-rendered).
|
||||
|
||||
Key points not visible in the diagram:
|
||||
|
||||
- **OCR network boundary:** the OCR service has no external port — it is reachable only on the internal Docker Compose network. Only the backend calls it. The OCR service fetches PDF files from MinIO using a presigned URL that the backend generates and passes in the request body; the PDF bytes never pass through the backend.
|
||||
- **SSE path:** server-sent event notifications go directly from the backend to the user's browser (not via the SvelteKit SSR layer) over a long-lived HTTP connection managed by `SseEmitterRegistry`.
|
||||
|
||||
---
|
||||
|
||||
## 2. Domain set
|
||||
|
||||
Both stacks are organised **package-by-domain**: each domain owns its entities, service, controller, repository, and DTOs. Domain names are identical across `backend/src/main/java/.../` and `frontend/src/lib/`.
|
||||
|
||||
### Tier-1 domains — have entities and user-facing CRUD
|
||||
|
||||
**`document`** — the archive's core concept. Owns `Document`, `DocumentVersion`, `TranscriptionBlock`, `DocumentAnnotation`, `DocumentComment`. Does NOT own persons or tags (references them by ID). Cross-domain deps: `person` (sender/receivers), `tag` (labels), `ocr` (HTR pipeline), `notification` (comment mentions), `audit` (every mutation).
|
||||
|
||||
**`person`** — historical individuals referenced by documents. Owns `Person`, `PersonNameAlias`, `PersonRelationship`. Does NOT own `AppUser` (login accounts are a separate domain). Cross-domain deps: `document` (relationship queries).
|
||||
|
||||
**`tag`** — hierarchical document categories. Owns `Tag` (self-referencing `parent_id` tree). Does NOT own documents; the join is document-side. No cross-domain deps.
|
||||
|
||||
**`user`** — login accounts and permission groups. Owns `AppUser`, `UserGroup`, invite tokens. Does NOT own `Person` records. Cross-domain deps: `audit` (user management events).
|
||||
|
||||
**`geschichte`** — family stories. Owns `Geschichte` (`DRAFT → PUBLISHED` lifecycle). Cross-domain deps: `person`, `document` (linked entities in the story body).
|
||||
|
||||
**`notification`** — in-app messages. Owns `Notification`. Delivers via `SseEmitterRegistry` (live) and persisted rows (bell dropdown). Cross-domain deps: `user` (recipient), `document` (context).
|
||||
|
||||
**`ocr`** — OCR/HTR pipeline orchestration. Owns `OcrJob`, `OcrJobDocument`, `SenderModel`. Calls the Python OCR service; maps streamed transcription blocks back to `document`. Cross-domain deps: `document` (target), `filestorage` (presigned URLs).
|
||||
|
||||
### Tier-2 domains — derived (UI without dedicated tables)
|
||||
|
||||
A **derived domain** has its own routes and UI but no database tables of its own; it is assembled from data owned by Tier-1 domains.
|
||||
|
||||
**`conversation`** (route: `/briefwechsel`) — bilateral letter timeline between two `Person`s. Derived from `Document` sender/receiver relationships. The `DocumentRepository` bidirectional query is the only data source.
|
||||
|
||||
**`activity`** (route: `/aktivitaeten`) — family activity feed. Derived from `audit_log`, `notifications`, and document events. No aggregation table; computed on-the-fly by `DashboardService` and composed in the SvelteKit load function.
|
||||
|
||||
---
|
||||
|
||||
## 3. Cross-cutting layer
|
||||
|
||||
Members of the cross-cutting layer have no entity of their own, no user-facing CRUD, and are consumed by two or more domains — or are framework infrastructure that every domain depends on.
|
||||
|
||||
| Member (backend package) | Purpose | Admission criteria |
|
||||
|---|---|---|
|
||||
| `audit` | Append-only event store (`audit_log`) for all domain mutations. Feeds the activity feed and Family Pulse dashboard. | Consumed by 5+ domains; no user-facing CRUD of its own |
|
||||
| `config` | Infrastructure bean definitions: `MinioConfig`, `AsyncConfig`, `WebConfig` | Framework infra; no business logic |
|
||||
| `dashboard` | Stats aggregation for the admin dashboard and Family Pulse widget | Aggregates from 3+ domains; no owned entities |
|
||||
| `exception` | `DomainException`, `ErrorCode` enum, `GlobalExceptionHandler` | Framework infra; consumed by every controller and service |
|
||||
| `filestorage` | `FileService` — MinIO/S3 upload, download, presigned-URL generation | Generic service; consumed by `document` and `ocr` |
|
||||
| `importing` | `MassImportService` — async ODS/Excel batch import | Orchestrates across `person`, `tag`, `document` |
|
||||
| `security` | `SecurityConfig`, `Permission` enum, `@RequirePermission` annotation, `PermissionAspect` (AOP) | Framework infra; enforced globally across all controllers |
|
||||
|
||||
**Frontend `shared/`** follows the same admission criteria. Key members: `api.server.ts` (typed openapi-fetch client factory), `errors.ts` (backend `ErrorCode` → i18n mapping), `shared/primitives/` (generic UI components used across ≥2 domains), `shared/discussion/` (comment/mention editor used by `document` and `geschichte`), `shared/utils/` (pure date/sort/debounce utilities).
|
||||
|
||||
---
|
||||
|
||||
## 4. Stack-symmetry principle
|
||||
|
||||
**Rule:** a domain has the same name on both stacks.
|
||||
|
||||
| Backend | Frontend |
|
||||
|---|---|
|
||||
| `backend/src/main/java/.../document/` | `frontend/src/lib/document/` |
|
||||
| `backend/src/main/java/.../person/` | `frontend/src/lib/person/` |
|
||||
| … | … |
|
||||
|
||||
Adding a new Tier-1 domain means creating a package on **both** sides under the same name. Adding only a backend package without a corresponding frontend folder (or vice versa) is a red flag in code review.
|
||||
|
||||
The backend has been domain-first since the project started. The frontend `src/lib/` was restructured from flat-by-type to domain-first in issue #408 (May 2026).
|
||||
|
||||
---
|
||||
|
||||
## 5. Key architectural decisions
|
||||
|
||||
### ADR-001 — OCR as a Python microservice
|
||||
The two OCR engines required (Surya for typewritten text, Kraken for Kurrent/Sütterlin HTR) exist only in the Python ecosystem. A separate `ocr-service` Python container exposes a simple HTTP API; the Spring Boot backend calls it via `RestClient`. All job tracking and business logic remain in Spring Boot. See [ADR-001](adr/001-ocr-python-microservice.md).
|
||||
|
||||
### ADR-002 — Polygon JSONB storage for annotations
|
||||
Kraken outputs polygon boundaries for historical handwriting; axis-aligned bounding boxes approximate them poorly. Annotation and transcription-block positions are stored as `polygon JSONB` columns. Display-only — server-side geometry continues to use the AABB fields. See [ADR-002](adr/002-polygon-jsonb-storage.md).
|
||||
|
||||
### ADR-003 — Unified activity feed (Chronik/Aktivität)
|
||||
Personal notifications and ambient activity (uploads, transcriptions, comments) are merged into one `/aktivitaeten` page. The SvelteKit load function composes data from `/api/dashboard/activity` and `/api/notifications` — no new backend orchestrator endpoint. See [ADR-003](adr/003-chronik-unified-activity-feed.md).
|
||||
|
||||
### ADR-004 — In-process PDFBox thumbnails
|
||||
Thumbnails are rendered in Spring Boot using Apache PDFBox (already a dependency) rather than delegating to the OCR service. A dedicated `thumbnailExecutor` pool isolates the work. See [ADR-004](adr/004-pdfbox-thumbnails.md).
|
||||
|
||||
### ADR-005 — thumbnailAspect + pageCount
|
||||
Aspect ratio (`PORTRAIT` / `LANDSCAPE`) and page count are persisted alongside the thumbnail JPEG at generation time — cheap to derive then, expensive to re-derive later. See [ADR-005](adr/005-thumbnail-aspect-and-page-count.md).
|
||||
|
||||
### ADR-006 — Synchronous domain events inside the publisher's transaction
|
||||
When a `Person` display name changes, all `TranscriptionBlock` `@mention` text must be rewritten atomically. This is done via Spring `ApplicationEventPublisher` + `@EventListener @Transactional` to avoid a circular dependency between `PersonService` and `TranscriptionBlockService`. See [ADR-006](adr/006-synchronous-domain-events-in-transaction.md).
|
||||
|
||||
### Layering rule
|
||||
```
|
||||
Controller → Service → Repository → DB
|
||||
```
|
||||
Controllers never call repositories directly. Services never reach into another domain's repository — they call the other domain's service. This keeps domain boundaries clear and business logic testable without a running database.
|
||||
|
||||
### Permission system
|
||||
Permissions are enforced via `@RequirePermission(Permission.X)` on controller methods, checked at runtime by `PermissionAspect` (Spring AOP). The `Permission` enum defines the available capabilities (`READ_ALL`, `WRITE_ALL`, `ADMIN`, `ADMIN_USER`, `ADMIN_TAG`, `ADMIN_PERMISSION`, `ANNOTATE_ALL`, `BLOG_WRITE`). This is not Spring Security's `@PreAuthorize` — do not mix the two mechanisms.
|
||||
|
||||
Sessions use a Base64-encoded Basic Auth token stored in an `httpOnly`, `SameSite=strict` cookie (`auth_token`, maxAge=86400 s). CSRF protection is disabled because this cookie configuration structurally prevents cross-origin credential theft. See [docs/security-guide.md](security-guide.md) for the full security reference.
|
||||
|
||||
---
|
||||
|
||||
## 6. Data flow walkthroughs
|
||||
|
||||
### Document upload
|
||||
|
||||
1. User submits the edit form (file + metadata) from the browser.
|
||||
2. The SvelteKit server action sends `PUT /api/documents/{id}` as `multipart/form-data`. `hooks.server.ts` (`handleFetch`) transparently injects the `Authorization` header from the `auth_token` cookie — the action itself is unaware of auth.
|
||||
3. `PermissionAspect` intercepts the controller method, verifies the user has `WRITE_ALL`, and proceeds.
|
||||
4. `DocumentController` delegates to `DocumentService.updateDocument()`.
|
||||
5. `DocumentService` resolves the `Person` sender by ID (via `PersonService`), resolves or creates `Tag`s (via `TagService`), then calls `FileService.uploadFile()`.
|
||||
6. `FileService` generates a key (`documents/{UUID}_{filename}`), streams the file to MinIO via the AWS SDK v2 S3Client.
|
||||
7. `DocumentService` persists the S3 key, sets `status = UPLOADED`, and saves to PostgreSQL.
|
||||
8. `AuditService` writes an `UPLOADED` event to `audit_log` in the same transaction.
|
||||
9. Backend returns the updated `Document` JSON; SvelteKit refreshes the document detail page.
|
||||
|
||||
### Transcription block autosave
|
||||
|
||||
1. The transcriber pauses typing; the frontend's `useBlockAutoSave` factory fires after a debounce interval.
|
||||
2. The browser sends `PUT /api/documents/{documentId}/transcription-blocks/{blockId}` with the new text and the block's current `version` (optimistic lock). `hooks.server.ts` (`handleFetch`) injects the `Authorization` header from the cookie.
|
||||
3. `TranscriptionService.saveBlock()` loads the block, checks the `@Version` field for concurrent edits, updates `block.text` and any `@mention` sidecars, and calls `saveAndFlush`.
|
||||
4. If a concurrent save collides (version mismatch), the backend returns `409 Conflict`; the frontend's `saveBlockWithConflictRetry` helper re-fetches and retries.
|
||||
5. On success, `AuditService` logs a `BLOCK_SAVED` event.
|
||||
6. If the block text contains a new `@PersonName` mention, `NotificationService` creates a `Notification` row for the mentioned person's `AppUser`.
|
||||
7. `SseEmitterRegistry` broadcasts the notification over the open SSE connection to that user's browser in real time.
|
||||
@@ -1,97 +1,5 @@
|
||||
# Docs — Familienarchiv
|
||||
# docs/
|
||||
|
||||
## Overview
|
||||
→ See [docs/README.md](./README.md) for the folder structure and documentation guide.
|
||||
|
||||
Project documentation organized into four categories: architecture decision records (ADRs), system architecture diagrams, infrastructure runbooks, and detailed UI/UX specifications.
|
||||
|
||||
## Folder Structure
|
||||
|
||||
```
|
||||
docs/
|
||||
├── adr/ # Architecture Decision Records
|
||||
├── architecture/ # C4 model diagrams and system architecture docs
|
||||
├── infrastructure/ # Deployment, CI/CD, and ops guides
|
||||
├── specs/ # UI/UX feature specifications (HTML)
|
||||
├── app-analysis-*.md # Application analysis reports
|
||||
├── mail.md # Mail system documentation
|
||||
├── security-guide.md # Security policies and hardening guide
|
||||
├── STYLEGUIDE.md # Coding and design style guide
|
||||
├── TODO-backend.md # Backend backlog
|
||||
└── TODO-frontend.md # Frontend backlog
|
||||
```
|
||||
|
||||
## ADR (`adr/`)
|
||||
|
||||
Architecture Decision Records capture major technical decisions and their rationale.
|
||||
|
||||
| ADR | Title | Status |
|
||||
|---|---|---|
|
||||
| `001-ocr-python-microservice.md` | OCR as a separate Python container | Accepted |
|
||||
| `002-polygon-jsonb-storage.md` | Polygon coordinates in JSONB columns | Accepted |
|
||||
| `003-chronik-unified-activity-feed.md` | Unified activity feed (Chronik) | Accepted |
|
||||
|
||||
When making a significant architectural change (new service, data model change, technology swap), write a new ADR following the format:
|
||||
- Status (Proposed / Accepted / Deprecated / Superseded)
|
||||
- Context (forces at play)
|
||||
- Decision (what we decided)
|
||||
- Consequences (trade-offs)
|
||||
- Alternatives Considered (table format)
|
||||
|
||||
## Architecture (`architecture/`)
|
||||
|
||||
Contains C4 model diagrams describing the system at different zoom levels:
|
||||
|
||||
- **Context diagram** — How Familienarchiv fits into the user and system ecosystem
|
||||
- **Container diagram** — The high-level technology choices (Spring Boot, SvelteKit, PostgreSQL, MinIO, OCR service)
|
||||
- **Component diagram** — Major structural components within the backend
|
||||
|
||||
Written in Markdown with embedded Mermaid or PlantUML diagrams (`c4-diagrams.md`).
|
||||
|
||||
## Infrastructure (`infrastructure/`)
|
||||
|
||||
Operational documentation for running Familienarchiv in production and CI.
|
||||
|
||||
| Document | Purpose |
|
||||
|---|---|
|
||||
| `ci-gitea.md` | Gitea CI/CD pipeline configuration |
|
||||
| `production-compose.md` | Production Docker Compose setup |
|
||||
| `s3-migration.md` | Migrating documents between S3 buckets |
|
||||
| `self-hosted-catalogue.md` | Self-hosted software catalogue |
|
||||
|
||||
## Specs (`specs/`)
|
||||
|
||||
High-fidelity UI/UX specifications written as standalone HTML files. These are design documents that describe exact layout, interactions, and responsive behavior before implementation.
|
||||
|
||||
Each spec typically includes:
|
||||
- Visual mockups with CSS-in-HTML styling
|
||||
- Interaction flows and state transitions
|
||||
- Responsive breakpoint behavior
|
||||
- Accessibility requirements
|
||||
|
||||
Examples of active spec areas:
|
||||
- Document detail page (`document-topbar-*.html`, `documents-page-spec.html`)
|
||||
- Admin interfaces (`admin-redesign-*.html`, `admin-tag-overhaul.html`)
|
||||
- Transcription workflows (`inline-transcription-*.html`, `annotation-transcription-*.html`)
|
||||
- Dashboard and activity feeds (`dashboard-*.html`, `chronik-spec.html`)
|
||||
- OCR admin (`ocr-admin-spec.html`)
|
||||
|
||||
## How to Use
|
||||
|
||||
1. **Before implementing a feature**, check `specs/` for an existing specification.
|
||||
2. **When proposing a new architecture**, draft an ADR in `adr/` and discuss before coding.
|
||||
3. **When deploying**, follow `infrastructure/production-compose.md`.
|
||||
4. **Keep TODO files updated** — they serve as lightweight backlogs.
|
||||
|
||||
## Style Guide
|
||||
|
||||
`STYLEGUIDE.md` covers:
|
||||
- Code formatting and linting rules
|
||||
- Component naming conventions
|
||||
- Color palette and typography
|
||||
- Accessibility standards (WCAG 2.1 AA)
|
||||
|
||||
## Contributing
|
||||
|
||||
- ADRs should be sequential (`NNN-descriptive-name.md`).
|
||||
- Specs should be self-contained HTML files viewable in a browser.
|
||||
- Infrastructure docs should include copy-pasteable commands.
|
||||
**LLM reminder:** ADRs are sequential — use the next number after the highest existing one in `docs/adr/`. When making a significant architectural change (new service, data model change, technology swap), write a new ADR before implementing.
|
||||
|
||||
346
docs/DEPLOYMENT.md
Normal file
346
docs/DEPLOYMENT.md
Normal file
@@ -0,0 +1,346 @@
|
||||
<!-- Last reviewed: 2026-05-05 — reviewed at every milestone close -->
|
||||
|
||||
# Familienarchiv — Deployment Reference
|
||||
|
||||
> **If the app is down right now → jump to [§4 Logs](#4-logs--observability).**
|
||||
|
||||
This doc is the Day-1 checklist and operational reference. It links to the canonical infrastructure docs in `docs/infrastructure/` rather than duplicating them.
|
||||
|
||||
**Audience:** operator bringing up a fresh instance, or Successor-X debugging a live incident.
|
||||
|
||||
**Ownership:** project owner. Update this file in any PR that changes the container topology, env vars, or backup procedure.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Deployment topology](#1-deployment-topology)
|
||||
2. [Environment variables](#2-environment-variables)
|
||||
3. [Bootstrap from scratch](#3-bootstrap-from-scratch)
|
||||
4. [Logs + observability](#4-logs--observability)
|
||||
5. [Backup + recovery](#5-backup--recovery)
|
||||
6. [Common operational tasks](#6-common-operational-tasks)
|
||||
7. [Known limitations](#7-known-limitations)
|
||||
|
||||
---
|
||||
|
||||
## 1. Deployment topology
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
Browser -->|HTTPS| Caddy["Caddy (TLS termination)"]
|
||||
Caddy -->|HTTP :3000| Frontend["Web Frontend\nSvelteKit Node adapter"]
|
||||
Caddy -->|HTTP :8080| Backend["API Backend\nSpring Boot / Jetty :8080"]
|
||||
Backend -->|JDBC :5432| DB[(PostgreSQL 16)]
|
||||
Backend -->|S3 API :9000| MinIO[(MinIO)]
|
||||
Backend -->|HTTP :8000 internal| OCR["OCR Service\nPython FastAPI"]
|
||||
OCR -->|presigned URL| MinIO
|
||||
Caddy -->|SSE proxy_pass| Backend
|
||||
```
|
||||
|
||||
**Key facts:**
|
||||
- Caddy terminates TLS and reverse-proxies to frontend (`:3000`) and backend (`:8080`). The Caddyfile is committed at [`infra/caddy/Caddyfile`](../infra/caddy/Caddyfile) and is installed on the host as `/etc/caddy/Caddyfile` (symlink).
|
||||
- The host binds all docker-published ports to `127.0.0.1` only; Caddy is the sole external entry point.
|
||||
- The OCR service has **no published port** — reachable only on the internal Docker network from the backend.
|
||||
- SSE notifications transit Caddy (browser → Caddy → backend); the backend is never reachable directly from the public internet. The SvelteKit SSR layer is bypassed for SSE, but Caddy is not.
|
||||
- The Caddyfile responds `404` on `/actuator/*` (defense in depth). Internal monitoring scrapes the backend on the docker network, not through Caddy.
|
||||
- Production and staging cohabit on the same host via docker compose project names: `archiv-production` (ports 8080/3000) and `archiv-staging` (ports 8081/3001).
|
||||
|
||||
### OCR memory requirements
|
||||
|
||||
The OCR service requires significant RAM for model loading. The dev compose sets `mem_limit: 12g`.
|
||||
|
||||
| Production target | RAM | Recommended OCR limit | Notes |
|
||||
|---|---|---|---|
|
||||
| Hetzner CX42 | 16 GB | 12 GB | Recommended for OCR-enabled production |
|
||||
| Hetzner CX32 | 8 GB | 6 GB | Accept reduced batch sizes and slower throughput |
|
||||
| Hetzner CX22 | 4 GB | — | Disable the OCR service (`profiles: [ocr]`); run OCR on demand only |
|
||||
|
||||
A CX32 cannot honour the default `mem_limit: 12g` — set the `OCR_MEM_LIMIT=6g` env var (in `.env.production` / `.env.staging`, or as a Gitea secret consumed by the workflow) before deploying on a CX32. The prod compose interpolates this var with a 12g default.
|
||||
|
||||
### Dev vs production differences
|
||||
|
||||
| Concern | Dev (`docker-compose.yml`) | Prod (`docker-compose.prod.yml`) |
|
||||
|---|---|---|
|
||||
| MinIO image tag | `minio/minio:latest` | Pinned `minio/minio:RELEASE.…` |
|
||||
| Data persistence | Bind mounts `./data/postgres`, `./data/minio` | Named Docker volumes (`postgres-data`, `minio-data`) |
|
||||
| MinIO credentials for backend | Root user/password | Service account `archiv-app` with bucket-scoped rights |
|
||||
| Bucket creation | `create-buckets` helper | Same helper, plus service-account bootstrap on every up |
|
||||
| Spring profile | `dev,e2e` (Swagger + e2e overrides) | unset — base `application.yaml` is production-ready |
|
||||
| Mail | Mailpit (local catcher) | Real SMTP (production) / Mailpit via `profiles: [staging]` (staging) |
|
||||
| Frontend image | Dev server, `target: development`, port 5173 | Node adapter, `target: production`, port 3000 |
|
||||
| Host port binding | All published | Bound to `127.0.0.1` only; Caddy is the front door |
|
||||
| Deploy method | `docker compose up -d` (manual) | Gitea Actions: `nightly.yml` (staging, cron) and `release.yml` (production, on `v*` tag) — both use `up -d --wait` |
|
||||
|
||||
Full prod compose: [`docker-compose.prod.yml`](../docker-compose.prod.yml). Workflow files: [`.gitea/workflows/nightly.yml`](../.gitea/workflows/nightly.yml), [`.gitea/workflows/release.yml`](../.gitea/workflows/release.yml).
|
||||
|
||||
---
|
||||
|
||||
## 2. Environment variables
|
||||
|
||||
All vars are set in `.env` at the repo root (copy from `.env.example`). The backend resolves them via `application.yaml`; the Docker Compose file wires them into each container.
|
||||
|
||||
**Any var found in `docker-compose.yml` or `application*.yaml` that is not in this table is a blocking review comment on any PR that changes those files.**
|
||||
|
||||
### Backend
|
||||
|
||||
| Variable | Purpose | Default | Required? | Sensitive? |
|
||||
|---|---|---|---|---|
|
||||
| `SPRING_DATASOURCE_URL` | PostgreSQL JDBC URL | — | YES | — |
|
||||
| `SPRING_DATASOURCE_USERNAME` | DB username | — | YES | — |
|
||||
| `SPRING_DATASOURCE_PASSWORD` | DB password | — | YES | YES |
|
||||
| `S3_ENDPOINT` | MinIO / OBS endpoint URL | — | YES | — |
|
||||
| `S3_ACCESS_KEY` | MinIO access key (use service account, not root in prod) | — | YES | YES |
|
||||
| `S3_SECRET_KEY` | MinIO secret key | — | YES | YES |
|
||||
| `S3_BUCKET_NAME` | Target bucket name | — | YES | — |
|
||||
| `S3_REGION` | S3 region string | `us-east-1` | YES | — |
|
||||
| `APP_ADMIN_USERNAME` | Bootstrap admin username (⚠ not in .env.example) | `admin` | YES | — |
|
||||
| `APP_ADMIN_PASSWORD` | Bootstrap admin password (⚠ ships as `admin123`) | `admin123` | YES | YES |
|
||||
| `APP_BASE_URL` | Public-facing URL for email links | `http://localhost:3000` | YES (prod) | — |
|
||||
| `APP_OCR_BASE_URL` | Internal URL of the OCR service | — | YES | — |
|
||||
| `APP_OCR_TRAINING_TOKEN` | Secret token for OCR training endpoints | — | YES (prod) | YES |
|
||||
| `MAIL_HOST` | SMTP host | `mailpit` (dev) | YES (prod) | — |
|
||||
| `MAIL_PORT` | SMTP port | `1025` (dev) | YES (prod) | — |
|
||||
| `MAIL_USERNAME` | SMTP username | — | YES (prod) | YES |
|
||||
| `MAIL_PASSWORD` | SMTP password | — | YES (prod) | YES |
|
||||
| `APP_MAIL_FROM` | From address for outbound mail | `noreply@familienarchiv.local` | YES (prod) | — |
|
||||
| `MAIL_SMTP_AUTH` | SMTP auth enabled | `false` (dev) | YES (prod) | — |
|
||||
| `MAIL_STARTTLS_ENABLE` | STARTTLS enabled | `false` (dev) | YES (prod) | — |
|
||||
| `SPRING_PROFILES_ACTIVE` | Spring profile | `dev,e2e` (compose) | YES | — |
|
||||
|
||||
### PostgreSQL container
|
||||
|
||||
| Variable | Purpose | Default | Required? | Sensitive? |
|
||||
|---|---|---|---|---|
|
||||
| `POSTGRES_USER` | DB superuser | `archive_user` | YES | — |
|
||||
| `POSTGRES_PASSWORD` | DB password | `change-me` | YES | YES |
|
||||
| `POSTGRES_DB` | Database name | `family_archive_db` | YES | — |
|
||||
|
||||
### MinIO container
|
||||
|
||||
| Variable | Purpose | Default | Required? | Sensitive? |
|
||||
|---|---|---|---|---|
|
||||
| `MINIO_ROOT_USER` | MinIO root username (dev compose only — prod compose hardcodes `archiv`) | `minio_admin` | YES (dev) | — |
|
||||
| `MINIO_ROOT_PASSWORD` / `MINIO_PASSWORD` | MinIO root password. **Used only by the `mc admin` bootstrap in prod, never by the backend.** | `change-me` | YES | YES |
|
||||
| `MINIO_APP_PASSWORD` | Password for the `archiv-app` service account that the backend uses. Bucket-scoped via `readwrite` policy on `familienarchiv`. Bootstrapped by `create-buckets`. | — | YES (prod) | YES |
|
||||
| `MINIO_DEFAULT_BUCKETS` | Bucket name (dev compose only — prod compose hardcodes `familienarchiv`) | `archive-documents` | YES (dev) | — |
|
||||
|
||||
### OCR service
|
||||
|
||||
| Variable | Purpose | Default | Required? | Sensitive? |
|
||||
|---|---|---|---|---|
|
||||
| `TRAINING_TOKEN` | Guards `/train` and `/segtrain` endpoints (accepts file uploads) | — | YES (prod) | YES |
|
||||
| `ALLOWED_PDF_HOSTS` | SSRF protection — comma-separated list of allowed PDF source hosts. **Do not widen to `*`** | `minio,localhost,127.0.0.1` | YES | — |
|
||||
| `KRAKEN_MODEL_PATH` | Directory containing Kraken HTR models (populated by `download-kraken-models.sh`) | `/app/models/` | — | — |
|
||||
| `BLLA_MODEL_PATH` | Kraken baseline layout analysis model path | `/app/models/blla.mlmodel` | — | — |
|
||||
| `OCR_MEM_LIMIT` | Container memory cap for ocr-service in `docker-compose.prod.yml`. Set to `6g` on CX32 hosts; leave unset on CX42+ to use the 12g default | `12g` (prod compose default) | — | — |
|
||||
|
||||
---
|
||||
|
||||
## 3. Bootstrap from scratch
|
||||
|
||||
Production and staging deploy via Gitea Actions (`release.yml` on `v*` tag, `nightly.yml` on cron). The server itself only needs to host Caddy, Docker, and the runner — the workflows handle the rest.
|
||||
|
||||
### 3.1 Server one-time setup
|
||||
|
||||
```bash
|
||||
# Base hardening
|
||||
ufw default deny incoming && ufw allow 22/tcp && ufw allow 80/tcp && ufw allow 443/tcp && ufw enable
|
||||
# /etc/ssh/sshd_config: PasswordAuthentication no, PermitRootLogin no
|
||||
|
||||
# Install Caddy 2 (https://caddyserver.com/docs/install#debian-ubuntu-raspbian)
|
||||
apt install caddy
|
||||
|
||||
# Use the Caddyfile from the repo (replace path with the runner's clone target)
|
||||
ln -sf /opt/familienarchiv/infra/caddy/Caddyfile /etc/caddy/Caddyfile
|
||||
systemctl reload caddy
|
||||
|
||||
# fail2ban — protect /api/auth/login from credential stuffing.
|
||||
# Jail watches the Caddy JSON access log for 401 responses on
|
||||
# /api/auth/login. The jail (maxretry=10 / findtime=10m / bantime=30m)
|
||||
# and filter are committed under infra/fail2ban/ — symlink them in:
|
||||
apt install fail2ban
|
||||
ln -sf /opt/familienarchiv/infra/fail2ban/jail.d/familienarchiv.conf \
|
||||
/etc/fail2ban/jail.d/familienarchiv.conf
|
||||
ln -sf /opt/familienarchiv/infra/fail2ban/filter.d/familienarchiv-auth.conf \
|
||||
/etc/fail2ban/filter.d/familienarchiv-auth.conf
|
||||
systemctl reload fail2ban
|
||||
# Verify after first deploy with:
|
||||
# fail2ban-client status familienarchiv-auth
|
||||
# fail2ban-regex /var/log/caddy/access.log familienarchiv-auth
|
||||
|
||||
# Tailscale — used by the backup pipeline to reach heim-nas (follow-up issue)
|
||||
curl -fsSL https://tailscale.com/install.sh | sh && tailscale up
|
||||
|
||||
# Self-hosted Gitea runner — register against the repo with a runner token.
|
||||
# This runner is assumed single-tenant: the deploy workflows write .env.*
|
||||
# files to disk during execution (cleaned up unconditionally on completion).
|
||||
# A multi-tenant runner would need to switch to stdin-piped env files.
|
||||
# (See https://docs.gitea.com/usage/actions/quickstart for the register step.)
|
||||
```
|
||||
|
||||
### 3.2 DNS records
|
||||
|
||||
```
|
||||
archiv.raddatz.cloud A <server IP>
|
||||
staging.raddatz.cloud A <server IP>
|
||||
git.raddatz.cloud A <server IP>
|
||||
```
|
||||
|
||||
### 3.3 Gitea secrets (Repo → Settings → Actions → Secrets)
|
||||
|
||||
| Secret | Used by | Notes |
|
||||
|---|---|---|
|
||||
| `PROD_POSTGRES_PASSWORD` | release.yml | strong unique password |
|
||||
| `PROD_MINIO_PASSWORD` | release.yml | MinIO root password; used only at bootstrap |
|
||||
| `PROD_MINIO_APP_PASSWORD` | release.yml | application service-account password |
|
||||
| `PROD_OCR_TRAINING_TOKEN` | release.yml | `python3 -c "import secrets; print(secrets.token_hex(32))"` |
|
||||
| `PROD_APP_ADMIN_USERNAME` | release.yml | e.g. `admin@archiv.raddatz.cloud` |
|
||||
| `PROD_APP_ADMIN_PASSWORD` | release.yml | **⚠ locked permanently on first deploy** — see §3.5 |
|
||||
| `STAGING_POSTGRES_PASSWORD` | nightly.yml | different from prod |
|
||||
| `STAGING_MINIO_PASSWORD` | nightly.yml | different from prod |
|
||||
| `STAGING_MINIO_APP_PASSWORD` | nightly.yml | different from prod |
|
||||
| `STAGING_OCR_TRAINING_TOKEN` | nightly.yml | different from prod |
|
||||
| `STAGING_APP_ADMIN_USERNAME` | nightly.yml | e.g. `admin@staging.raddatz.cloud` |
|
||||
| `STAGING_APP_ADMIN_PASSWORD` | nightly.yml | locked on first staging deploy |
|
||||
| `MAIL_HOST` | release.yml | SMTP relay hostname (prod only) |
|
||||
| `MAIL_PORT` | release.yml | typically `587` |
|
||||
| `MAIL_USERNAME` | release.yml | SMTP user |
|
||||
| `MAIL_PASSWORD` | release.yml | SMTP password |
|
||||
|
||||
### 3.4 First deploy
|
||||
|
||||
```bash
|
||||
# 1. Trigger nightly.yml manually (Repo → Actions → nightly → "Run workflow")
|
||||
# Expected: docker compose up -d --wait succeeds for archiv-staging, then
|
||||
# the workflow's "Smoke test deployed environment" step asserts:
|
||||
# - https://staging.raddatz.cloud/login returns 200
|
||||
# - HSTS header is present
|
||||
# - /actuator/health returns 404 (defense-in-depth check)
|
||||
# 2. (Optional) Re-verify manually
|
||||
curl -I https://staging.raddatz.cloud/
|
||||
# Expected: 200 (login page) with HSTS + X-Content-Type-Options headers
|
||||
# 3. When staging looks healthy, push a v* tag to trigger release.yml
|
||||
git tag v1.0.0 && git push origin v1.0.0
|
||||
```
|
||||
|
||||
### 3.5 ⚠ Admin password is locked on first deploy
|
||||
|
||||
`UserDataInitializer` creates the admin user **only if the email does not exist**. The first successful deploy persists the admin password to the database. Changing `PROD_APP_ADMIN_PASSWORD` in Gitea secrets after that point has **no effect** — the secret is only consulted when the row is missing.
|
||||
|
||||
Before the first deploy: rotate `PROD_APP_ADMIN_PASSWORD` to a strong value. After the first deploy: change the admin password via the in-app account settings, not via the Gitea secret.
|
||||
|
||||
---
|
||||
|
||||
## 4. Logs + observability
|
||||
|
||||
### First-response commands
|
||||
|
||||
```bash
|
||||
# Stream backend logs (most useful first)
|
||||
docker compose logs --follow --tail=100 backend
|
||||
|
||||
# Stream all services
|
||||
docker compose logs --follow
|
||||
|
||||
# Single snapshot
|
||||
docker compose logs --tail=200 <service>
|
||||
# services: frontend, backend, db, minio, ocr-service
|
||||
```
|
||||
|
||||
### Log locations
|
||||
|
||||
- **Backend application log**: stdout (captured by Docker). Access inside the container at `/app/logs/` via `docker exec`.
|
||||
- **Spring Actuator health**: `http://localhost:8080/actuator/health` (internal only in prod — port 8081 for Prometheus scraping)
|
||||
- **Prometheus scraping**: management port 8081, path `/actuator/prometheus`. Internal only; Caddy blocks `/actuator/*` externally.
|
||||
|
||||
### Future observability
|
||||
|
||||
Phase 7 of the Production v1 milestone adds Prometheus + Loki + Grafana. No monitoring infrastructure is in place yet.
|
||||
|
||||
---
|
||||
|
||||
## 5. Backup + recovery
|
||||
|
||||
### Current state — no automated backup
|
||||
|
||||
No automated backup is configured. Manual procedure for a point-in-time backup:
|
||||
|
||||
```bash
|
||||
# PostgreSQL dump
|
||||
docker exec archive-db pg_dump -U ${POSTGRES_USER} ${POSTGRES_DB} > backup-$(date +%Y%m%d).sql
|
||||
|
||||
# MinIO data (bind-mounted in dev)
|
||||
# Copy ./data/minio/ to external storage
|
||||
```
|
||||
|
||||
Restoration:
|
||||
```bash
|
||||
# Restore Postgres
|
||||
docker exec -i archive-db psql -U ${POSTGRES_USER} ${POSTGRES_DB} < backup-YYYYMMDD.sql
|
||||
```
|
||||
|
||||
### Planned — phase 5 of Production v1 milestone
|
||||
|
||||
Automated backup (nightly `pg_dump` + MinIO `mc mirror` over Tailscale to `heim-nas`) is a follow-up issue. Until that ships: **manual backups are the only recovery option.**
|
||||
|
||||
### Rollback
|
||||
|
||||
Each release tag corresponds to a docker image tag on the host daemon (built via DooD; no registry). Rolling back to a previous tag is one command:
|
||||
|
||||
```bash
|
||||
TAG=v1.0.0 docker compose \
|
||||
-f docker-compose.prod.yml \
|
||||
-p archiv-production \
|
||||
--env-file /opt/familienarchiv/.env.production \
|
||||
up -d --wait --remove-orphans
|
||||
```
|
||||
|
||||
If the rollback target image is no longer present on the host (host disk pruned, etc.), re-trigger `release.yml` for that tag from Gitea Actions UI — it rebuilds and redeploys.
|
||||
|
||||
**Flyway migrations are not auto-rolled-back.** If a release contained a destructive migration (drop column, rename table), a tag rollback brings the schema back to a previous app version but the data shape has already changed. For breaking schema changes, prefer a forward-only fix.
|
||||
|
||||
---
|
||||
|
||||
## 6. Common operational tasks
|
||||
|
||||
### Reset dev database (truncates data, keeps schema)
|
||||
|
||||
```bash
|
||||
bash scripts/reset-db.sh
|
||||
```
|
||||
|
||||
> Truncates all data but does **not** drop the schema or re-run Flyway. Use for E2E test resets, not full reinstalls.
|
||||
> ⚠️ Script hardcodes `DB_USER=archive_user` and `DB_NAME=family_archive_db` — if you customised these in `.env`, edit the script accordingly.
|
||||
|
||||
### Rebuild frontend container (clears node_modules volume)
|
||||
|
||||
```bash
|
||||
bash scripts/rebuild-frontend.sh
|
||||
```
|
||||
|
||||
> Assumes the Docker Compose volume is named `familienarchiv_frontend_node_modules`. If your project directory is not named `familienarchiv`, edit line 16 of the script.
|
||||
|
||||
### Download Kraken OCR models
|
||||
|
||||
```bash
|
||||
bash scripts/download-kraken-models.sh
|
||||
```
|
||||
|
||||
> Downloads the Kurrent/Sütterlin HTR models. Run once after a fresh clone or when models are updated.
|
||||
|
||||
### Trigger a mass import (Excel/ODS)
|
||||
|
||||
1. Place the import file in the `import/` bind mount on the backend container.
|
||||
2. Call `POST /api/admin/trigger-import` (requires `ADMIN` permission).
|
||||
3. The import runs asynchronously — poll `GET /api/admin/import-status` or watch backend logs.
|
||||
|
||||
---
|
||||
|
||||
## 7. Known limitations
|
||||
|
||||
| Limitation | Reason | Reference |
|
||||
|---|---|---|
|
||||
| **Single-node OCR service** | The two required OCR engines (Surya + Kraken) exist only in the Python ecosystem; horizontal scaling would require a job queue not currently implemented | [ADR-001](adr/001-ocr-python-microservice.md) |
|
||||
| **No multi-tenancy** | Designed as a single-family private archive; all authenticated users share the same document space | Deliberate scope decision (family-only product frame) |
|
||||
| **No multi-region** | Single PostgreSQL + MinIO instance; no replication or failover | Deliberate scope decision |
|
||||
| **Max upload size** | 50 MB per file (500 MB per request for multi-file) | Configurable in `application.yaml` (`spring.servlet.multipart`) |
|
||||
| **No automated backup** | Phase 5 of Production v1 milestone is not yet implemented | See §5 above |
|
||||
123
docs/GLOSSARY.md
Normal file
123
docs/GLOSSARY.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Familienarchiv — Glossary
|
||||
|
||||
Domain-specific and overloaded terms used in this codebase.
|
||||
Each entry: **Term** — definition (≤ 2 sentences). Where two terms are easily confused, a _Not to be confused with_ note follows.
|
||||
|
||||
For architecture context see [`docs/architecture/c4-diagrams.md`](architecture/c4-diagrams.md).
|
||||
For domain package structure see [`docs/ARCHITECTURE.md`](ARCHITECTURE.md) _(coming: DOC-2)_.
|
||||
|
||||
---
|
||||
|
||||
## Identity Terms
|
||||
|
||||
**AppUser** (`AppUser`) — a real person who can log into the system (a family member or administrator). `AppUser` records carry login credentials, group memberships, and notification history.
|
||||
_Not to be confused with [Person](#person-person)_ — an AppUser is never recorded as a document sender, receiver, or historical individual.
|
||||
|
||||
**Reader** — an `AppUser` whose effective permissions include `READ_ALL` but neither `WRITE_ALL` nor `ANNOTATE_ALL`. Readers see a dedicated dashboard (`isReader = !canWrite && !canAnnotate`) focused on browsing documents, persons, and stories rather than contribution tasks. A user who also holds `BLOG_WRITE` is still classified as a Reader and additionally sees a drafts module.
|
||||
_Not to be confused with [AppUser](#appuser-appuser)_ — Reader is a permission-derived role, not an entity.
|
||||
|
||||
**Permission** — a discrete capability string assigned to a `UserGroup` (e.g. `READ_ALL`, `WRITE_ALL`, `ADMIN`, `ADMIN_USER`, `ADMIN_TAG`, `ADMIN_PERMISSION`). Enforced via the `@RequirePermission` AOP annotation on controller methods, checked at runtime by `PermissionAspect`; not via Spring Security's `@PreAuthorize`.
|
||||
|
||||
**Person** (`Person`) — a historical individual in the family archive (sender, receiver of letters, person mentioned in transcriptions). NEVER has a login account and NEVER appears as an `AppUser`.
|
||||
_Not to be confused with [AppUser](#appuser-appuser)_ — `Person` is a historical record; `AppUser` is someone who can log in today.
|
||||
|
||||
**PersonNameAlias** (`PersonNameAlias`) — an alternate or historical name form associated with a `Person` (e.g. maiden name, nickname, abbreviated form). Used to locate `Person` records during mass import via `PersonNameAliasType`.
|
||||
|
||||
**UserGroup** (`UserGroup`) — a named permission bundle assigned to one or more `AppUser`s. A user's effective permissions are the union of all permissions across all groups they belong to.
|
||||
|
||||
---
|
||||
|
||||
## Document-Related Terms
|
||||
|
||||
**Annotation** (`DocumentAnnotation`) — a free-form polygon or shape drawn over a document page image to highlight a region of interest. Always scoped to a specific page of a `Document`; stored as a polygon (JSONB).
|
||||
_See also [TranscriptionBlock](#transcriptionblock-transcriptionblock)._
|
||||
|
||||
**Comment** (`DocumentComment`, table `document_comments`) — a threaded discussion message attached to a `Document`. Always scoped to a `Document`; optionally further contextualized by a specific `DocumentAnnotation` or `TranscriptionBlock`.
|
||||
|
||||
**Document** (`Document`) — a single archival item (letter, postcard, photograph) with a file stored in MinIO/S3 and associated metadata (sender, receivers, date, tags, transcription blocks).
|
||||
|
||||
**DocumentVersion** (`DocumentVersion`) — an append-only snapshot of a `Document`'s metadata at a point in time. Append-only by convention; no consumer-facing create or update endpoint exists. The entity uses Lombok `@Data` (which generates setters), so immutability is enforced by application convention, not at the Java level.
|
||||
|
||||
**Tag** (`Tag`) — a hierarchical category that can be applied to `Document`s. Tags are self-referencing via a `parent_id` foreign key, forming a tree structure.
|
||||
|
||||
**TranscriptionBlock** (`TranscriptionBlock`) — a paragraph-level segment of a `Document`'s transcribed text, with a polygon region (stored as JSONB) identifying its position on the page. One document can have many blocks across multiple pages.
|
||||
_See also [Annotation](#annotation-documentannotation)._
|
||||
|
||||
---
|
||||
|
||||
## Workflow Terms
|
||||
|
||||
**DocumentStatus lifecycle** — the ordered states a `Document` moves through:
|
||||
`PLACEHOLDER → UPLOADED → TRANSCRIBED → REVIEWED → ARCHIVED`
|
||||
- `PLACEHOLDER`: created during mass import; no file attached yet.
|
||||
- `UPLOADED`: a file has been stored in MinIO/S3.
|
||||
- `TRANSCRIBED`: all transcription blocks have been marked done.
|
||||
- `REVIEWED`: a reviewer has approved the transcription.
|
||||
- `ARCHIVED`: the document is finalized and read-only.
|
||||
|
||||
**Mass import** — an asynchronous batch process (`MassImportService`) that reads an Excel or ODS file and creates `Person`s, `Tag`s, and `PLACEHOLDER` `Document`s in one shot. Only one import can run at a time (`IMPORT_ALREADY_RUNNING` error if attempted concurrently).
|
||||
|
||||
**Transcription queue** — the set of `Document`s and `TranscriptionBlock`s awaiting work, computed on-the-fly from `Document`/`Block` status. Three views: segmentation queue, transcription queue, ready-to-read queue. NOT a persistent entity — no `transcription_queues` table exists.
|
||||
_See also [DocumentStatus lifecycle](#documentstatus-lifecycle)._
|
||||
|
||||
---
|
||||
|
||||
## OCR-Specific Terms
|
||||
|
||||
**HTR** — Handwritten Text Recognition. Recognizes cursive and historical handwriting (contrasted with OCR for printed/typewritten text). The primary mode used for letters in this archive.
|
||||
|
||||
**Kurrent** — Old German cursive handwriting style, the primary historical script appearing in letters from the 1899–1950 period covered by this archive.
|
||||
|
||||
**OCR** — Optical Character Recognition. Recognizes printed or typewritten text. Used for typed documents; HTR is used for handwritten ones.
|
||||
|
||||
**OcrJob** (`OcrJob`, table `ocr_jobs`) — a first-class persistent entity tracking a batch OCR run across one or more documents (`OcrJobDocument`, table `ocr_job_documents`). Distinct from the concept of "running OCR on a single document." Lifecycle: `PENDING → RUNNING → DONE / FAILED` (see `OcrJobStatus`).
|
||||
|
||||
**SenderModel** (`SenderModel`, table `sender_models`) — a fine-tuned Kraken HTR model trained on a specific historical correspondent's handwriting. Both an OCR-service concept (the model weights) and a persistent entity linking a `Person` to the path of their trained model file.
|
||||
|
||||
**Sütterlin** — A specific standardized style of Kurrent taught in German schools from 1915 to 1941.
|
||||
|
||||
---
|
||||
|
||||
## Other Domain Terms
|
||||
|
||||
**Aktivität / Aktivitäten** `[user-facing]` — the family activity feed accessible at `/aktivitaeten`. Shows recent documents, transcriptions, comments, and Geschichten as a chronological timeline.
|
||||
_See also [Chronik](#chronik-internal)._
|
||||
|
||||
**Briefwechsel** `[user-facing]` — the bilateral conversation timeline between two `Person`s, derived from `Document` sender/receiver relationships. Accessible at `/briefwechsel`. Not a persistent entity — data is computed from existing `Document` records.
|
||||
_See also [Derived domain](#derived-domain)._
|
||||
|
||||
**Chronik** `[internal]` — the conceptual and code-level name for the unified activity feed (per ADR-003 `003-chronik-unified-activity-feed.md`). Used in code, architecture documents, and ADRs. The user-facing label for the same concept is [Aktivität](#aktivitat--aktivitaten-user-facing).
|
||||
|
||||
**Geschichte** (`Geschichte`) `[user-facing]` — a narrative story or article published in the archive, linking `Person`s and `Document`s. Lifecycle: `DRAFT → PUBLISHED` (see `GeschichteStatus`). DRAFT stories are hidden from users without the `BLOG_WRITE` permission.
|
||||
|
||||
**Notification** (`Notification`) — an in-app message delivered to an `AppUser`. No email or SMS delivery exists today. Delivered via Server-Sent Events (`SseEmitterRegistry`) and persisted in the `notifications` table.
|
||||
|
||||
**Audit log** (`AuditLog`, table `audit_log`) — an append-only event store recording domain-level activity (document edits, user actions, etc.). Append-only by application convention; a `REVOKE UPDATE, DELETE` is attempted at the DB layer (see migrations V46, V47) but is a no-op if the application role is the table owner in PostgreSQL. Do not rely on DB-enforced immutability — the constraint is application-layer only.
|
||||
|
||||
---
|
||||
|
||||
## Architectural Terms
|
||||
|
||||
**Cross-cutting** — code that lives in `lib/shared/` (frontend) or cross-domain packages (backend) because it has no entity of its own, no user-facing CRUD, AND is used by two or more domains OR is framework infrastructure (error handling, API client, i18n utilities).
|
||||
|
||||
**Derived domain** — a Tier-2 frontend domain that has its own UI but no backend entities of its own. Data is computed from Tier-1 domain records. Current derived domains: `conversation` (from `Document` sender/receivers) and `activity` (from audit, notifications, document events).
|
||||
_See also [Briefwechsel](#briefwechsel-user-facing)._
|
||||
|
||||
**Domain** — a Tier-1 bounded context with its own entities, controller, service, repository, and DTOs. Backend domains: `document`, `person`, `tag`, `user`, `geschichte`, `notification`, `ocr`, `audit`, `dashboard`. Frontend domains mirror this structure under `src/lib/`.
|
||||
|
||||
---
|
||||
|
||||
## Infrastructure Terms
|
||||
|
||||
**archiv-app** — the bucket-scoped MinIO service account the backend uses to read and write the `familienarchiv` bucket. Distinct from the MinIO root account (`archiv`, used only by the bootstrap container for admin operations). Defined and provisioned in [`infra/minio/bootstrap.sh`](../infra/minio/bootstrap.sh) and consumed by the backend as `S3_ACCESS_KEY` in [`docker-compose.prod.yml`](../docker-compose.prod.yml). The attached `archiv-app-policy` grants `s3:GetObject/PutObject/DeleteObject` on `familienarchiv/*` and `s3:ListBucket/GetBucketLocation` on the bucket only — not the built-in `readwrite` policy which would grant `s3:*` on all buckets.
|
||||
_See also [ADR-010 — MinIO stays self-hosted, not Hetzner OBS](./adr/010-minio-self-hosted-not-hetzner-obs.md)._
|
||||
|
||||
---
|
||||
|
||||
## Pending Terms
|
||||
|
||||
_Terms flagged as potentially ambiguous that have not yet been formally defined here. Add an entry above and remove it from this list when resolved._
|
||||
|
||||
- Terms surfaced by Epic 1 audit findings (#388–#392) — review audit reports under `docs/audits/` when available and add any term flagged as ambiguous.
|
||||
- `OcrBatchService` vs `OcrAsyncRunner` — both handle async OCR orchestration; their division of responsibility should be clarified here.
|
||||
- `Stammbaum` — the genealogy tree view; relationship to `PersonRelationship` entity.
|
||||
85
docs/README.md
Normal file
85
docs/README.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# docs/
|
||||
|
||||
Project documentation organised into four categories: architecture decision records (ADRs), system architecture diagrams, infrastructure runbooks, and detailed UI/UX specifications.
|
||||
|
||||
## Folder structure
|
||||
|
||||
```
|
||||
docs/
|
||||
├── adr/ # Architecture Decision Records
|
||||
├── architecture/ # C4 model diagrams and system architecture docs
|
||||
├── infrastructure/ # Deployment, CI/CD, and ops guides
|
||||
├── specs/ # UI/UX feature specifications (HTML)
|
||||
├── ARCHITECTURE.md # Human-readable architecture overview (DOC-2)
|
||||
├── DEPLOYMENT.md # Day-1 checklist and operational reference (DOC-5)
|
||||
├── GLOSSARY.md # Domain terminology (DOC-3)
|
||||
├── security-guide.md # Security policies and hardening guide
|
||||
└── STYLEGUIDE.md # Coding and design style guide
|
||||
```
|
||||
|
||||
## ADR (`adr/`)
|
||||
|
||||
Architecture Decision Records capture major technical decisions and their rationale.
|
||||
|
||||
| ADR | Title | Status |
|
||||
| -------------------------------------- | ------------------------------------ | -------- |
|
||||
| `001-ocr-python-microservice.md` | OCR as a separate Python container | Accepted |
|
||||
| `002-polygon-jsonb-storage.md` | Polygon coordinates in JSONB columns | Accepted |
|
||||
| `003-chronik-unified-activity-feed.md` | Unified activity feed (Chronik) | Accepted |
|
||||
|
||||
When making a significant architectural change (new service, data model change, technology swap), write a new ADR:
|
||||
|
||||
- **Status** (Proposed / Accepted / Deprecated / Superseded)
|
||||
- **Context** (forces at play)
|
||||
- **Decision** (what we decided)
|
||||
- **Consequences** (trade-offs)
|
||||
- **Alternatives Considered** (table format)
|
||||
|
||||
ADRs are sequential (`NNN-descriptive-name.md`). Do not reuse numbers.
|
||||
|
||||
## Architecture (`architecture/`)
|
||||
|
||||
Contains C4 model diagrams describing the system at different zoom levels:
|
||||
|
||||
- **Context diagram** — How Familienarchiv fits into the user and system ecosystem
|
||||
- **Container diagram** — The high-level technology choices (Spring Boot, SvelteKit, PostgreSQL, MinIO, OCR service)
|
||||
- **Component diagram** — Major structural components within the backend
|
||||
|
||||
Written in Markdown with embedded Mermaid diagrams (`c4-diagrams.md`). Gitea renders these automatically.
|
||||
|
||||
For the human-readable architecture narrative, see [`docs/ARCHITECTURE.md`](ARCHITECTURE.md).
|
||||
|
||||
## Infrastructure (`infrastructure/`)
|
||||
|
||||
Operational documentation for running Familienarchiv in production and CI.
|
||||
|
||||
| Document | Purpose |
|
||||
| -------------------------- | ---------------------------------------------------- |
|
||||
| `ci-gitea.md` | Gitea CI/CD pipeline configuration |
|
||||
| `production-compose.md` | Production Docker Compose setup and VPS provisioning |
|
||||
| `s3-migration.md` | Migrating documents between S3 buckets |
|
||||
| `self-hosted-catalogue.md` | Self-hosted software catalogue |
|
||||
|
||||
For the day-1 deployment checklist, see [`docs/DEPLOYMENT.md`](DEPLOYMENT.md).
|
||||
|
||||
## Specs (`specs/`)
|
||||
|
||||
High-fidelity UI/UX specifications written as standalone HTML files. These are design documents describing exact layout, interactions, and responsive behavior before implementation.
|
||||
|
||||
Each spec typically includes:
|
||||
|
||||
- Visual mockups with CSS-in-HTML styling
|
||||
- Interaction flows and state transitions
|
||||
- Responsive breakpoint behavior
|
||||
- Accessibility requirements
|
||||
|
||||
Before implementing a feature, check `specs/` for an existing specification.
|
||||
|
||||
## Style Guide
|
||||
|
||||
[`docs/STYLEGUIDE.md`](STYLEGUIDE.md) covers:
|
||||
|
||||
- Code formatting and linting rules
|
||||
- Component naming conventions
|
||||
- Color palette and typography
|
||||
- Accessibility standards (WCAG 2.1 AA)
|
||||
52
docs/adr/007-reader-dashboard-permission-discriminant.md
Normal file
52
docs/adr/007-reader-dashboard-permission-discriminant.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# ADR-007: Reader-dashboard permission discriminant
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
Issue #447 introduced two distinct user cohorts on the home page:
|
||||
|
||||
- **Contributors** — transcribe, annotate, upload. The existing `MissionControlStrip`, `EnrichmentBlock`, `DashboardResumeStrip`, `DashboardFamilyPulse`, `DashboardActivityFeed`, and `DropZone` are aimed at them.
|
||||
- **Readers** — browse and consume finished content. Older, less technical, on smaller devices. The contribution-focused widgets are noise to them.
|
||||
|
||||
`AppUser` permissions are already derived in `+layout.server.ts` and exposed via `$page.data` as `canWrite`, `canAnnotate`, and `canBlogWrite`. The home route needs a single boolean to switch its layout and its data fetch set, and that boolean has to be load-bearing — every future permission introduced has to be classified against it.
|
||||
|
||||
## Decision
|
||||
|
||||
```ts
|
||||
const isReader = !canWrite && !canAnnotate;
|
||||
```
|
||||
|
||||
Computed at the start of `+page.server.ts` `load()`. When true, the loader fetches a lean reader set (stats / top-4 persons / recent docs / recent stories — and drafts when `canBlogWrite`) via `Promise.allSettled` and returns a discriminated-union shape the page distinguishes via `data.isReader`.
|
||||
|
||||
`BLOG_WRITE` is **not** part of the discriminant. A `READ_ALL + BLOG_WRITE` user is still a reader and additionally sees the `ReaderDraftsModule`. Story writers are conceptually closer to readers than to transcribers: they consume the archive, occasionally publish narrative on top of it, and have no business with the transcription queue.
|
||||
|
||||
A `BLOG_WRITE`-only user (no `READ_ALL`) is also classified as a reader by this formula. Because every reader API requires `READ_ALL`, all four content tiles degrade to empty via `Promise.allSettled`. They see the empty reader shell plus the drafts module — acceptable behaviour, since this permission combination is degenerate by configuration. Documented in `docs/GLOSSARY.md`.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
| Alternative | Why rejected |
|
||||
|---|---|
|
||||
| New `/reader-home` route with a server-side redirect from `/` | Two routes that mostly answer the same product question (home page). Bookmarks split, breadcrumbs split, header `home` link has to know which to use. The conditional-render keeps a single canonical URL and lets the auth state drive the layout, matching how `canWrite` already gates the upload zone in the contributor branch. |
|
||||
| `AppUser.dashboardVariant` column persisted in the DB | Permissions already encode the relevant signal; a separate field has to be kept in sync with permission changes. Drift is a feature foot-gun: a user gets `WRITE_ALL` granted but their `dashboardVariant` field still says `reader` and they keep seeing the wrong UI. |
|
||||
| Middleware/handle hook redirecting based on permissions | Same logical issue as the dedicated route plus a network round-trip on every dashboard hit. The discriminant runs once inside the same `load()` that's already fetching the user. |
|
||||
| `isReader = !canWrite && !canAnnotate && !canBlogWrite` (exclude `BLOG_WRITE` from readers) | Treats blog writers as contributors. They would land on the `MissionControlStrip` they cannot meaningfully use (no `WRITE_ALL`, no `ANNOTATE_ALL`) and would have to scroll past the transcription queue to find their own drafts. The reader shell + drafts module fits their actual workflow. |
|
||||
|
||||
## Consequences
|
||||
|
||||
**Easier:**
|
||||
- Reader and contributor views share one canonical home URL — no redirect, no routing fork.
|
||||
- Adding a new content tile to the reader dashboard is a single-file change inside the `if (isReader)` branch of `load()` plus a new component import in `+page.svelte`.
|
||||
- Backend `@RequirePermission(READ_ALL)` on every reader API call remains the load-bearing security gate. `isReader` is purely a UX flag — manipulating it client-side serves a different layout to the same authenticated user with the same permissions.
|
||||
|
||||
**Harder:**
|
||||
- Every future `Permission` value has to be explicitly classified against this formula. Adding a permission that grants contribution rights but not `WRITE_ALL`/`ANNOTATE_ALL` would silently leave its bearers on the reader dashboard. Mitigation: keep this ADR linked from `+page.server.ts` and from the `Permission` enum's Javadoc.
|
||||
- The discriminated-union return type of `load()` (`{isReader: true} | {isReader: false}`) requires every consumer to narrow on `data.isReader` before accessing branch-specific fields. The current `+page.svelte` already does this with the top-level `{#if data.isReader}`; new consumers of the home loader must follow suit.
|
||||
|
||||
## Future Direction
|
||||
|
||||
If a third cohort emerges (e.g. an admin home with system-health tiles), promote the discriminant to a tagged-union: `dashboard: 'reader' | 'contributor' | 'admin'`. The discriminant computation moves from `+page.server.ts` into a small helper in `lib/shared/server/`, callable from any route that needs the same classification (e.g. a future `/welcome` onboarding flow).
|
||||
|
||||
If `BLOG_WRITE`-only access becomes a real product mode (rather than the degenerate combination it is today), revisit whether the formula should add a `canRead` precondition: `isReader = canRead && !canWrite && !canAnnotate`.
|
||||
68
docs/adr/008-fts-sql-pagination.md
Normal file
68
docs/adr/008-fts-sql-pagination.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# ADR-008: SQL-level pagination for full-text search via window-function CTE
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
`DocumentRepository.findAllMatchingIdsByFts` (formerly `findRankedIdsByFts`) returns all matching document IDs for a FTS query. `DocumentService.searchDocuments` then paginates in memory on the RELEVANCE sort path.
|
||||
|
||||
A pre-production audit against 1,520 documents measured:
|
||||
|
||||
```
|
||||
rows_per_call: 911 / call (query: "walter")
|
||||
```
|
||||
|
||||
At current scale this is acceptable — 911 UUIDs ≈ 14 KB, ms-level DB time. At 100 K+ documents two failure modes emerge:
|
||||
|
||||
1. **Memory**: a broad query returns ~60 K UUIDs ≈ 1 MB per request, multiplied by concurrent users.
|
||||
2. **Latency**: the `LATERAL` join does work proportional to match-set size; at 60 K matches the FTS step alone exceeds 100 ms per query.
|
||||
|
||||
Tracked as finding **F-31 (High)** in the pre-production architectural review.
|
||||
|
||||
## Decision
|
||||
|
||||
Push pagination and rank ordering into SQL for the RELEVANCE sort path when no non-text filters are active (pure full-text search):
|
||||
|
||||
```sql
|
||||
WITH q AS (
|
||||
SELECT CASE WHEN websearch_to_tsquery('german', :query)::text <> ''
|
||||
THEN to_tsquery('simple', regexp_replace(
|
||||
websearch_to_tsquery('german', :query)::text,
|
||||
'''([^'']+)''', '''\\1'':*', 'g'))
|
||||
END AS pq
|
||||
), matches AS (
|
||||
SELECT d.id, ts_rank(d.search_vector, q.pq) AS rank
|
||||
FROM documents d, q
|
||||
WHERE d.search_vector @@ q.pq
|
||||
)
|
||||
SELECT id, rank, COUNT(*) OVER () AS total
|
||||
FROM matches
|
||||
ORDER BY rank DESC, id
|
||||
OFFSET :offset LIMIT :limit
|
||||
```
|
||||
|
||||
`COUNT(*) OVER ()` returns the full match count alongside each page row in a single round-trip — no separate count query needed.
|
||||
|
||||
`rows_per_call` for the FTS query drops from match-set size (911) to page size (≤ 50).
|
||||
|
||||
When non-text filters (date range, sender, receiver, tags, status) are also active, the existing path is preserved: `findAllMatchingIdsByFts` returns all ranked IDs, which are passed as an `IN` clause to the JPA Specification, and `totalElements` comes from the JPA `Page.getTotalElements()`. This keeps the count accurate across the combined filter set.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
**1. Two-query approach (separate COUNT + paged SELECT)**
|
||||
Correct, but doubles round-trips. The window function achieves the same result in one query.
|
||||
|
||||
**2. Capped result set with a user-visible warning**
|
||||
Return at most N results (e.g. 500) and show "showing top 500 of many results". Simpler, but degrades UX for broad queries and doesn't reduce latency proportionally (still scans N rows).
|
||||
|
||||
**3. Full SQL rewrite combining FTS + JPA Specification filters**
|
||||
Possible via a native query that embeds all filter predicates. Eliminates the in-memory SENDER/RECEIVER sort paths and the two-phase approach. High complexity, tight coupling to schema details, loses type-safe JPA Specification composition. Deferred to a future refactor if scale demands it.
|
||||
|
||||
## Consequences
|
||||
|
||||
- **`rows_per_call` for pure-text FTS searches drops to ≤ page size** — the primary metric.
|
||||
- **SENDER and RECEIVER sort paths stay in-memory** for combined text+filter queries. For pure-text queries with SENDER/RECEIVER sort, the current approach (fetch all matched IDs, build spec, load all matched entities, sort in-memory) still runs. This is acceptable while the archive stays under ~10 K documents.
|
||||
- **RELEVANCE sort with text+filters still loads the full filtered entity set in-memory.** The filtered set is typically much smaller than the raw FTS match set, so the cost is bounded by filter selectivity, not total match count.
|
||||
- **`findAllMatchingIdsByFts` is retained** for: (a) the bulk-edit "select all" fast path (`findIdsForFilter`), (b) the document density chart (`getDensity`), and (c) the SENDER/RECEIVER in-memory sort paths.
|
||||
50
docs/adr/009-standalone-compose-not-overlay.md
Normal file
50
docs/adr/009-standalone-compose-not-overlay.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# ADR-009: Standalone `docker-compose.prod.yml`, not an overlay
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
The repository's `docker-compose.yml` is a development stack: every service is built locally, ports are exposed on `0.0.0.0` for dev tooling, the frontend runs `npm run dev` with hot-reload, the backend is `spring-boot:run` with the dev profile, and there is no Caddy, no `archiv-app` service account, no admin-credential lock-in, no healthcheck-gated startup sequence. The dev stack reflects "single developer on a laptop", not "production on a single VPS".
|
||||
|
||||
The pre-merge design (issue #497, comment #8331) sketched two ways to add a production stack:
|
||||
|
||||
1. **Overlay** — keep `docker-compose.yml` as the base, add `docker-compose.prod.yml` as a `-f` overlay (`docker compose -f docker-compose.yml -f docker-compose.prod.yml up`). Compose merges the two files at runtime.
|
||||
2. **Standalone** — make `docker-compose.prod.yml` a fully self-contained file that does not reference or merge with `docker-compose.yml` at all. Project-name namespacing (`-p archiv-production`, `-p archiv-staging`) keeps multi-environment deploys clean on a single host.
|
||||
|
||||
The earlier `docs/infrastructure/production-compose.md` notes assumed overlay because the original plan was to **remove** MinIO in production (replace with Hetzner Object Storage), so the prod file would only need to remove one service and add a few. With MinIO retained (see ADR-010), the prod stack diverges from dev in essentially every service: build vs pre-built image, target stage, port binding, env vars, healthcheck, restart policy, mem_limit, profile gating, service account, depends_on chain. Overlay would mostly be `override:` blocks that nullify the dev defaults — a fragile inversion.
|
||||
|
||||
## Decision
|
||||
|
||||
`docker-compose.prod.yml` is standalone. Production and staging both run it directly:
|
||||
|
||||
```
|
||||
production: docker compose -f docker-compose.prod.yml -p archiv-production --env-file .env.production ...
|
||||
staging: docker compose -f docker-compose.prod.yml -p archiv-staging --env-file .env.staging --profile staging ...
|
||||
```
|
||||
|
||||
Environment isolation is achieved via the Docker Compose project name (`-p`). Volumes, networks, and containers are namespaced by the project name, so production and staging cohabit cleanly on the same host without interfering.
|
||||
|
||||
The dev `docker-compose.yml` is unchanged — `docker compose up` still works for developers, and its `frontend` service now specifies `target: development` explicitly so the new multi-stage Dockerfile builds the right stage.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
| Alternative | Why rejected |
|
||||
|---|---|
|
||||
| Overlay (`-f base.yml -f prod.yml`) | With MinIO retained and most services differing across nearly every field, the overlay would consist mostly of `override:` blocks that null out dev defaults. Compose's merge semantics for nested keys (env, ports, healthcheck) are sharp — silent merges of port mappings, env-var entries, and depends_on edges cost reviewer hours. Standalone is one file the reader can hold in their head. |
|
||||
| Two fully separate files (dev + prod) but with shared YAML anchors via `extends:` | `extends:` works across files but is a niche feature and is increasingly discouraged in compose v2. Reviewer load is higher than reading two flat files. |
|
||||
| Generate prod compose from a template at deploy time (e.g. ytt, kustomize) | Adds a build-time step and a new tool to the operator toolchain. Justified for a fleet of 10+ environments; overkill for production + staging on one host. |
|
||||
| Single compose file with environment-specific profiles | Compose profiles select which *services* run, not which *configuration* a service runs with. Using profiles to swap "build locally" vs "pull image" would smear dev and prod across one file. |
|
||||
|
||||
## Consequences
|
||||
|
||||
- The prod file can be read top-to-bottom without cross-referencing `docker-compose.yml`. Onboarding and review cost drops.
|
||||
- Volume namespacing is automatic (`archiv-production_postgres-data`, `archiv-staging_postgres-data`) — no manual `volumes:` aliasing.
|
||||
- Dev compose churn (e.g. swapping a dev port) cannot accidentally affect production. The two files are independent.
|
||||
- The cost is duplication: identical environment variables (e.g. `POSTGRES_DB: archiv`) appear in both files. This duplication is bounded — there is no incentive to add more services that exist in both — and the alternative (overlay) carries its own duplication via `override:` boilerplate.
|
||||
- The retired `docs/infrastructure/production-compose.md` narrative is trimmed to a pointer at the live files. The cost/sizing rationale is preserved there.
|
||||
|
||||
## Future Direction
|
||||
|
||||
If the deployment fleet ever grows beyond two environments on one host (e.g. add a `demo` environment, or shard staging across two VPS for load testing), revisit the templating decision. At three+ environments the duplication starts to bite and a template engine (kustomize or ytt) becomes attractive.
|
||||
53
docs/adr/010-minio-self-hosted-not-hetzner-obs.md
Normal file
53
docs/adr/010-minio-self-hosted-not-hetzner-obs.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# ADR-010: MinIO stays self-hosted on the production VPS
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
`docs/infrastructure/production-compose.md` (pre-this-PR) sketched a production topology in which the application bucket migrates from in-cluster MinIO to Hetzner Object Storage (OBS, S3-compatible). The motivation was operational: one less service to back up, no MinIO RAM/disk pressure on the VPS, hand off durability to the hyperscaler.
|
||||
|
||||
Two facts revisited at pre-merge review (issue #497, comment #8331) changed the answer:
|
||||
|
||||
1. **Current data size is small.** The archive is ~13 GB of file uploads (Kurrent letters, scanned ODS files, attachment PDFs). Hetzner OBS billing on this size is dominated by the per-month base fee (~5 EUR/mo for the smallest unit), not capacity or egress. The break-even point against the VPS's existing disk is far above the current footprint.
|
||||
2. **MinIO is already production-grade.** The dev stack uses MinIO; the backend already drives it via the AWS SDK v2 with a generic `S3_ENDPOINT`. Switching providers is a runtime env-var change (`S3_ENDPOINT`, `S3_ACCESS_KEY`, `S3_SECRET_KEY`) plus an `mc mirror` to copy objects. There is no application-level rewrite cost waiting.
|
||||
|
||||
If Hetzner OBS were a one-way-door (provider-specific SDK, complex IAM integration, multi-month migration), the decision would deserve a serious weighing. As reversible as the migration is, deferring it costs nothing.
|
||||
|
||||
## Decision
|
||||
|
||||
MinIO stays on the production VPS for the first launch. The application bucket is created and managed inside the docker-compose stack (`infra/minio/bootstrap.sh`). The backend uses a least-privilege service account (`archiv-app`) with a bucket-scoped IAM policy, not the MinIO root credentials.
|
||||
|
||||
Hetzner Object Storage is **explicitly deferred**, not rejected. The migration path is documented as a runbook in `docs/DEPLOYMENT.md` (when the trigger fires): provision an OBS bucket, run `mc mirror local-minio:/familienarchiv obs:/familienarchiv`, rotate the three env vars, restart the backend, decommission the MinIO service from `docker-compose.prod.yml`.
|
||||
|
||||
## Triggers to re-evaluate
|
||||
|
||||
Revisit the decision when **any** of the following holds:
|
||||
|
||||
- The `minio-data` volume exceeds 50 GB and is growing > 5 GB/month.
|
||||
- MinIO healthcheck latency exceeds 200 ms p95 (signal of disk pressure on the host).
|
||||
- The VPS upgrade required to keep MinIO healthy costs more per month than the equivalent OBS bucket + traffic.
|
||||
- Backup of the MinIO volume to `heim-nas` over Tailscale (deferred follow-up) is implemented and consistently runs > 30 min nightly. At that point durability-as-a-service starts paying for itself.
|
||||
|
||||
The migration runbook in `docs/DEPLOYMENT.md` is the script for executing the swap when one of the triggers fires.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
| Alternative | Why rejected (for now) |
|
||||
|---|---|
|
||||
| Migrate to Hetzner Object Storage in this PR | Premature. Adds an external dependency, locks the operator into the Hetzner ecosystem before the data has demonstrated it needs hyperscaler durability, blocks the PR on a migration that buys ~5 GB of headroom. |
|
||||
| Migrate to S3 (AWS) for HA across regions | Way over-spec for a family archive. Egress cost would dwarf any benefit; durability concerns at this size are addressed by nightly off-site backup, not by multi-region replication. |
|
||||
| Drop S3 abstraction entirely; store files directly on the VPS disk | Possible, but loses the bucket-policy IAM surface (least-privilege service account), loses presigned-URL flow (OCR service downloads files via short-lived URLs, not via shared filesystem), loses the migration path to OBS. The S3 indirection is cheap insurance. |
|
||||
| Self-hosted on-VPS plus periodic `mc mirror` to Hetzner OBS for off-site backup | This is the **target** for the backup pipeline follow-up. Treated as backup, not primary — primary stays MinIO. |
|
||||
|
||||
## Consequences
|
||||
|
||||
- The production VPS sizing (Hetzner CX42, 16 GB RAM, 80 GB disk) must accommodate MinIO's working set. Current footprint leaves ample headroom.
|
||||
- Backup of MinIO data is the operator's responsibility until the off-site `mc mirror` pipeline is implemented (deferred follow-up). The DEPLOYMENT.md rollback procedure explicitly flags this — manual backup is the only recovery option until the pipeline ships.
|
||||
- The backend never sees the MinIO root password; it uses the `archiv-app` service account with a bucket-scoped IAM policy (see `infra/minio/bootstrap.sh`). A backend RCE/SSRF cannot escalate beyond the `familienarchiv` bucket.
|
||||
- The migration to Hetzner OBS remains a small, well-understood runbook step rather than a major refactor. No application code, no SDK swap.
|
||||
|
||||
## Future Direction
|
||||
|
||||
When one of the triggers above fires, the migration is: provision OBS bucket → `mc mirror` → rotate three env vars → restart backend → remove MinIO service from compose. The bucket-scoped policy translates 1:1 to an OBS user policy (S3-compatible).
|
||||
58
docs/adr/011-single-tenant-gitea-runner.md
Normal file
58
docs/adr/011-single-tenant-gitea-runner.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# ADR-011: Single-tenant Gitea runner with secrets-on-disk env-files
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
The deploy workflows (`.gitea/workflows/nightly.yml`, `release.yml`) execute on a self-hosted Gitea Actions runner. The runner has Docker-out-of-Docker access (the host's Docker socket is mounted into the runner), so `docker compose build` produces images on the host daemon and `docker compose up` consumes them directly — no registry hop.
|
||||
|
||||
Two workflow steps shape the security model:
|
||||
|
||||
1. **"Write env file"** — the workflow writes every required secret to `.env.staging` or `.env.production` on the runner's filesystem so that `docker compose --env-file` can consume them. The file lives on disk for the duration of the workflow.
|
||||
2. **"Cleanup env file"** — the matching `if: always()` step deletes the env file after the workflow ends, regardless of success.
|
||||
|
||||
This shape only works under one operational assumption: **the runner is single-tenant**. The runner is owned by the same operator who owns the secrets, no other repositories run jobs on the same runner, and no untrusted code is executed (no public fork PRs trigger workflows). If any of those held, the env-file-on-disk approach would be a credential exposure path — a sibling job could read `.env.production`, or a malicious PR could exfiltrate the secrets via a step.
|
||||
|
||||
The alternative — `docker compose --env-file <(printf "..." )` (bash process substitution) — is technically supported and would keep secrets out of the on-disk filesystem. It is more secure under a multi-tenant runner but requires bash 4+ and is brittle inside YAML (the `printf` step would need to escape every secret value containing newlines, equals signs, or quotes).
|
||||
|
||||
## Decision
|
||||
|
||||
The runner is treated as single-tenant for the lifetime of the v1 deployment. The workflows write env-files to disk under that assumption and rely on the `if: always()` cleanup step to remove them. The operational assumption is documented in-comment at the top of both workflow files (`nightly.yml`, `release.yml`) so the next operator who considers adding a second repo or accepting public PRs has the trigger surfaced in front of them.
|
||||
|
||||
Concretely:
|
||||
|
||||
- The Gitea runner only runs jobs for `marcel/familienarchiv`.
|
||||
- No public fork PRs trigger the workflows (Gitea defaults to requiring an explicit approval on first-time contributor PRs for the actions to run).
|
||||
- Secrets are stored in Gitea repository secrets and injected via `${{ secrets.* }}`. They land in the env-file at workflow start and are removed at workflow end.
|
||||
|
||||
## Migration trigger
|
||||
|
||||
Switch to the multi-tenant-safe pattern when **any** of the following becomes true:
|
||||
|
||||
- A second repository starts using the same runner.
|
||||
- A workflow accepts contributions that can run untrusted code (public PRs without manual approval).
|
||||
- The runner is moved off the operator's controlled host onto shared infrastructure.
|
||||
|
||||
The migration path is one-step per workflow: replace the "Write env file" step with `--env-file <(printf '%s' "${{ secrets.STAGING_ENV_BLOB }}")` and store the full env-file as a single Gitea secret. The cleanup step is then unnecessary because the env-file never touches disk.
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
| Alternative | Why rejected (for now) |
|
||||
|---|---|
|
||||
| `--env-file <(printf "...")` via bash process substitution | More secure under multi-tenant. Brittle for multi-line / quoted secret values; harder to debug ("env file not found" with no diff to inspect). Justified once the trigger above fires. |
|
||||
| Docker secrets (`docker secret create` + `compose secrets:`) | Designed for Swarm; outside of Swarm, compose secrets read from files anyway, so the on-disk surface is the same. Adds complexity without changing the threat model. |
|
||||
| External secret manager (Vault, AWS Secrets Manager) | Adds a third-party dependency to the deploy path. For a family-archive deployment with one operator and one VPS, the cost outweighs the benefit at this scale. |
|
||||
| GitHub-hosted ephemeral runners | Would require uploading the prod-deploy artifacts to a registry first, then a deploy step on the VPS connecting back. Inverts the current Docker-out-of-Docker simplicity for marginal security gain. The single-tenant self-hosted runner *is* ephemeral in practice — the secrets are written to a directory the runner controls, then deleted. |
|
||||
|
||||
## Consequences
|
||||
|
||||
- The runner host's filesystem is in the secret-trust boundary. The host is hardened per `docs/DEPLOYMENT.md` (ufw, fail2ban, Tailscale-only SSH).
|
||||
- An operator who later adds a second repo to the runner without revisiting the workflows would silently break the trust assumption. The in-file comments at the top of `nightly.yml` and `release.yml` are the breadcrumb that surfaces the assumption at change time.
|
||||
- The `if: always()` cleanup step is load-bearing: removing it (e.g. during a future workflow refactor) leaves credentials on disk between runs. Treat it as a permanent invariant.
|
||||
- Workflow debuggability stays high: an operator who needs to know what env-file the deploy ran with can SSH onto the host while a workflow is in flight and `cat .env.staging` — useful for first-deploy diagnostics.
|
||||
|
||||
## Future Direction
|
||||
|
||||
When the trigger fires, migrate both workflows in a single PR: replace the "Write env file" step with a single `--env-file <(printf '%s' …)` invocation, drop the cleanup step, and consolidate the per-secret Gitea entries into a single multi-line `STAGING_ENV_BLOB` / `PROD_ENV_BLOB` secret. Single commit, both workflows, no application change.
|
||||
@@ -1,5 +1,9 @@
|
||||
# Familienarchiv — C4 Architecture Diagrams
|
||||
|
||||
> For domain terminology used in these diagrams, see [docs/GLOSSARY.md](../GLOSSARY.md).
|
||||
>
|
||||
> **Cross-diagram stubs:** Components placed outside a `System_Boundary` block with a "See diagram X" annotation are reference stubs — they represent a component fully defined in another sub-diagram and appear here only to show the cross-domain dependency without duplicating the full definition.
|
||||
|
||||
## Level 1 — System Context
|
||||
|
||||
Who uses the system and what external systems does it interact with.
|
||||
@@ -8,13 +12,15 @@ Who uses the system and what external systems does it interact with.
|
||||
C4Context
|
||||
title System Context: Familienarchiv
|
||||
|
||||
Person(admin, "Administrator", "Manages users, triggers bulk imports, reviews documents")
|
||||
Person(member, "Family Member", "Searches, browses, and reads archived documents")
|
||||
Person(admin, "Administrator", "Manages users, triggers bulk imports, reviews and transcribes documents")
|
||||
Person(member, "Family Member", "Access by administrator invite. Searches, browses, reads, and transcribes archived documents.")
|
||||
|
||||
System(familienarchiv, "Familienarchiv", "Web application for digitising, organising, and searching family documents")
|
||||
System_Ext(mail, "Email Service", "SMTP server. Delivers notification emails (mentions, replies) and password-reset links.")
|
||||
|
||||
Rel(admin, familienarchiv, "Manages via browser", "HTTPS")
|
||||
Rel(member, familienarchiv, "Searches and views via browser", "HTTPS")
|
||||
Rel(member, familienarchiv, "Searches, reads, and transcribes via browser", "HTTPS")
|
||||
Rel(familienarchiv, mail, "Sends notification and password-reset emails (optional)", "SMTP")
|
||||
```
|
||||
|
||||
---
|
||||
@@ -28,13 +34,16 @@ C4Container
|
||||
title Container Diagram: Familienarchiv
|
||||
|
||||
Person(user, "User", "Admin or family member")
|
||||
System_Ext(mail, "Email Service", "SMTP server. Delivers notification and password-reset emails.")
|
||||
|
||||
System_Boundary(archiv, "Familienarchiv (Docker Compose)") {
|
||||
Container(frontend, "Web Frontend", "SvelteKit / Node.js", "Server-side rendered UI. Handles session cookies, search UI, document viewer, and admin panel.")
|
||||
Container(frontend, "Web Frontend", "SvelteKit / Node.js", "Server-side rendered UI. Handles auth session cookies, document search and viewer, transcription editor, annotation layer, family tree (Stammbaum), stories (Geschichten), activity feed (Chronik), enrichment workflow, and admin panel.")
|
||||
|
||||
Container(backend, "API Backend", "Spring Boot 4 / Java 21 / Jetty", "REST API. Implements document management, search, user auth, file upload/download, and Excel import.")
|
||||
Container(backend, "API Backend", "Spring Boot 4 / Java 21 / Jetty", "REST API. Implements document management, search, user auth, file upload/download, transcription, OCR orchestration, and SSE notifications.")
|
||||
|
||||
ContainerDb(db, "Relational Database", "PostgreSQL 16", "Stores document metadata, persons, users, permission groups, tags, and Spring Session data.")
|
||||
Container(ocr, "OCR Service", "Python FastAPI / port 8000", "Handwritten text recognition (HTR) and OCR microservice. Single-node by design — see ADR-001. Reachable only on the internal Docker network; no external port exposed.")
|
||||
|
||||
ContainerDb(db, "Relational Database", "PostgreSQL 16", "Stores document metadata, persons, users, permission groups, tags, transcription blocks, audit log, and Spring Session data.")
|
||||
|
||||
ContainerDb(storage, "Object Storage", "MinIO (S3-compatible)", "Stores the actual document files (PDFs, scans). Objects keyed as documents/{UUID}_{filename}.")
|
||||
|
||||
@@ -43,8 +52,12 @@ C4Container
|
||||
|
||||
Rel(user, frontend, "Uses", "HTTPS / Browser")
|
||||
Rel(frontend, backend, "API requests with Basic Auth token", "HTTP / REST / JSON")
|
||||
Rel(backend, user, "SSE notifications (server-sent events)", "HTTP / SSE — direct backend-to-browser")
|
||||
Rel(backend, db, "Reads and writes metadata and sessions", "JDBC / SQL")
|
||||
Rel(backend, storage, "Uploads and streams document files", "HTTP / S3 API (AWS SDK v2)")
|
||||
Rel(backend, ocr, "OCR job requests with presigned MinIO URL", "HTTP / REST / JSON")
|
||||
Rel(backend, mail, "Sends notification and password-reset emails (optional)", "SMTP")
|
||||
Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
|
||||
Rel(mc, storage, "Creates bucket on startup", "MinIO Client CLI")
|
||||
```
|
||||
|
||||
@@ -52,149 +65,444 @@ C4Container
|
||||
|
||||
## Level 3 — Components: API Backend
|
||||
|
||||
The internal structure of the Spring Boot backend.
|
||||
The internal structure of the Spring Boot backend, split into seven focused sub-diagrams.
|
||||
|
||||
### 3a — Security & Authentication
|
||||
|
||||
How requests are authenticated and write operations are authorised.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: API Backend
|
||||
title Component Diagram: API Backend — Security & Authentication
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(secFilter, "Security Filter Chain", "Spring Security", "Enforces authentication on all requests. Parses Basic Auth header and constructs an Authentication token; delegates credential validation to DaoAuthenticationProvider via BCrypt. Permits password-reset, invite, and register endpoints without authentication.")
|
||||
Component(permAspect, "PermissionAspect", "Spring AOP", "Intercepts methods annotated with @RequirePermission. Checks user's granted authorities against the required permission. Throws 401/403 if denied.")
|
||||
Component(secConf, "SecurityConfig", "Spring @Configuration", "Configures filter chain: all routes require authentication, CSRF disabled, BCrypt password encoder, DaoAuthenticationProvider with CustomUserDetailsService.")
|
||||
Component(userDetails, "CustomUserDetailsService", "Spring Security UserDetailsService", "Loads AppUser by email from DB. Converts group permissions to Spring GrantedAuthority objects. Logs unknown permissions.")
|
||||
}
|
||||
|
||||
Rel(frontend, secFilter, "All requests", "HTTP / Basic Auth header")
|
||||
Rel(secFilter, permAspect, "Authenticated requests reach guarded service methods", "")
|
||||
Rel(secConf, userDetails, "Wires as UserDetailsService", "")
|
||||
Rel(userDetails, db, "Loads user by email", "JDBC")
|
||||
```
|
||||
|
||||
### 3b — Document Management & Import
|
||||
|
||||
Document management, file storage, and bulk Excel/ODS import.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: API Backend — Document Management & Import
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL")
|
||||
ContainerDb(minio, "MinIO")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(docCtrl, "DocumentController", "Spring MVC — /api/documents", "CRUD for documents: search, get by ID, update metadata, upload/download file, conversation thread, and batch metadata updates.")
|
||||
Component(adminCtrl, "AdminController", "Spring MVC — /api/admin", "Triggers asynchronous Excel/ODS mass import (requires ADMIN permission). Reports import state (IDLE/RUNNING/DONE/FAILED).")
|
||||
|
||||
Component(secFilter, "Security Filter Chain", "Spring Security", "Enforces authentication on all requests. Parses Basic Auth header and validates credentials via BCrypt.")
|
||||
Component(permAspect, "PermissionAspect", "Spring AOP", "Intercepts methods annotated with @RequirePermission. Checks user's granted authorities against the required permission. Throws 401/403 if denied.")
|
||||
Component(docSvc, "DocumentService", "Spring Service", "Core document business logic: store, update, search. Resolves persons and tags, delegates file I/O to FileService, builds dynamic JPA Specifications, and integrates with audit logging.")
|
||||
Component(fileSvc, "FileService", "Spring Service", "Wraps AWS SDK v2 S3Client. Uploads files with UUID-keyed paths, computes SHA-256 hash, downloads with content-type detection, and generates presigned URLs for OCR access.")
|
||||
Component(massImport, "MassImportService", "Spring Service — @Async", "Reads Excel/ODS files from /import mount. Tracks import state (IDLE/RUNNING/DONE/FAILED) and delegates to ExcelService. Returns immediately; processing runs asynchronously.")
|
||||
Component(excelSvc, "ExcelService", "Spring Service", "Parses Excel/ODS workbooks (Apache POI). Column indices configurable via application.properties. Creates/updates document records per row.")
|
||||
Component(minioConf, "MinioConfig", "Spring @Configuration", "Creates the S3Client and S3Presigner beans with path-style access for MinIO. Validates MinIO connectivity on startup.")
|
||||
|
||||
Component(docCtrl, "DocumentController", "Spring MVC — /api/documents", "CRUD for documents. Endpoints: search, get by ID, update metadata, upload file, download file, get conversation thread.")
|
||||
Component(personCtrl, "PersonController", "Spring MVC — /api/persons", "Lists and searches family members. Also returns all documents sent by a person.")
|
||||
Component(userCtrl, "UserController", "Spring MVC — /api/users", "Returns current user (/me). Creates and deletes users (requires ADMIN_USER permission).")
|
||||
Component(adminCtrl, "AdminController", "Spring MVC — /api/admin", "Triggers asynchronous Excel mass import (requires ADMIN permission).")
|
||||
Component(groupCtrl, "GroupController", "Spring MVC — /api/groups", "Lists and manages permission groups.")
|
||||
Component(tagCtrl, "TagController", "Spring MVC — /api/tags", "Lists tags for typeahead.")
|
||||
|
||||
Component(docSvc, "DocumentService", "Spring Service", "Core business logic: store, update, search documents. Resolves persons and tags. Delegates file I/O to FileService. Builds JPA Specifications for dynamic search queries.")
|
||||
Component(fileSvc, "FileService", "Spring Service", "Wraps AWS SDK v2 S3Client. Uploads files with UUID-keyed paths. Downloads with content-type detection (PDF, JPEG, PNG, octet-stream).")
|
||||
Component(excelSvc, "ExcelService", "Spring Service", "Parses Excel workbooks (Apache POI). Column indices are configurable via application.properties. Creates/updates document records per row.")
|
||||
Component(massImport, "MassImportService", "Spring Service — @Async", "Reads Excel files from /import mount. Delegates to ExcelService. Runs asynchronously so the HTTP response returns immediately.")
|
||||
Component(userSvc, "UserService", "Spring Service", "User CRUD. Encodes passwords with BCrypt. Assigns users to permission groups.")
|
||||
Component(dataInit, "DataInitializer", "CommandLineRunner", "On startup: creates default admin user and groups if none exist. Seeds test data (persons, documents) if DB is empty.")
|
||||
|
||||
Component(docRepo, "DocumentRepository", "Spring Data JPA", "Queries documents. Supports Specification-based dynamic search, conversation thread queries (bidirectional sender/receiver), and filename lookups.")
|
||||
Component(docSpec, "DocumentSpecifications", "JPA Criteria API", "Factory for composable query predicates: hasText (full-text across title/filename/transcription/location), hasSender, hasReceiver (join), isBetween (date range), hasTags (subquery AND logic).")
|
||||
Component(personRepo, "PersonRepository", "Spring Data JPA", "Lists all persons sorted by last name. Supports name search for typeahead.")
|
||||
Component(userRepo, "AppUserRepository", "Spring Data JPA", "Finds users by username. Used by Spring Security and UserService.")
|
||||
Component(tagRepo, "TagRepository", "Spring Data JPA", "Finds or creates tags by name (case-insensitive).")
|
||||
Component(groupRepo, "UserGroupRepository", "Spring Data JPA", "Manages permission groups.")
|
||||
|
||||
Component(minioConf, "MinioConfig", "Spring @Configuration", "Creates the S3Client bean with path-style access for MinIO. Validates MinIO connectivity on startup.")
|
||||
Component(secConf, "SecurityConfig", "Spring @Configuration", "Configures filter chain: all routes require authentication, CSRF disabled, BCrypt password encoder, DaoAuthenticationProvider with CustomUserDetailsService.")
|
||||
Component(userDetails, "CustomUserDetailsService", "Spring Security UserDetailsService", "Loads AppUser by username from DB. Converts group permissions to Spring GrantedAuthority objects.")
|
||||
Component(docRepo, "DocumentRepository", "Spring Data JPA", "Queries documents with Specification-based dynamic search, bidirectional conversation thread queries, full-text search with ranking and match highlighting, and transcription pipeline queue projections.")
|
||||
Component(docSpec, "DocumentSpecifications", "JPA Criteria API", "Factory for composable predicates: hasText (full-text), hasSender, hasReceiver, isBetween (date range), hasTags (subquery AND/OR logic).")
|
||||
}
|
||||
|
||||
Rel(frontend, secFilter, "All requests", "HTTP / Basic Auth header")
|
||||
Rel(secFilter, permAspect, "Authenticated requests proceed", "")
|
||||
|
||||
Rel(secFilter, docCtrl, "Routes to", "")
|
||||
Rel(secFilter, personCtrl, "Routes to", "")
|
||||
Rel(secFilter, userCtrl, "Routes to", "")
|
||||
Rel(secFilter, adminCtrl, "Routes to", "")
|
||||
|
||||
Rel(permAspect, docCtrl, "Guards", "AOP @Around")
|
||||
Rel(permAspect, userCtrl, "Guards", "AOP @Around")
|
||||
Rel(permAspect, adminCtrl, "Guards", "AOP @Around")
|
||||
Component(personSvc, "PersonService", "Spring Service", "See diagram 3e. Called by DocumentService to resolve sender / receiver persons by ID.")
|
||||
Component(tagSvc, "TagService", "Spring Service", "See diagram 3d. Called by DocumentService to find or create tags by name.")
|
||||
|
||||
Rel(frontend, docCtrl, "Document requests", "HTTP / JSON")
|
||||
Rel(frontend, adminCtrl, "Trigger import", "HTTP / JSON")
|
||||
Rel(docCtrl, docSvc, "Delegates to", "")
|
||||
Rel(adminCtrl, massImport, "Triggers", "")
|
||||
Rel(userCtrl, userSvc, "Delegates to", "")
|
||||
|
||||
Rel(docSvc, fileSvc, "Upload / download files", "")
|
||||
Rel(docSvc, docRepo, "Reads / writes documents", "")
|
||||
Rel(docSvc, docSpec, "Builds search predicates", "")
|
||||
Rel(docSvc, personRepo, "Resolves sender / receivers", "")
|
||||
Rel(docSvc, tagRepo, "Finds or creates tags", "")
|
||||
|
||||
Rel(massImport, excelSvc, "Parses Excel file", "")
|
||||
Rel(docSvc, personSvc, "Resolves sender / receivers", "")
|
||||
Rel(docSvc, tagSvc, "Finds or creates tags", "")
|
||||
Rel(massImport, excelSvc, "Parses Excel/ODS file", "")
|
||||
Rel(excelSvc, docSvc, "Creates / updates documents", "")
|
||||
Rel(minioConf, fileSvc, "Provides S3Client and S3Presigner beans", "")
|
||||
Rel(fileSvc, minio, "PUT / GET / presigned URL objects", "S3 API / HTTP")
|
||||
Rel(docRepo, db, "SQL queries", "JDBC")
|
||||
```
|
||||
|
||||
### 3c — Document Transcription Pipeline
|
||||
|
||||
Annotation-driven transcription: page markup, text blocks, versioning, and comment threads.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: API Backend — Document Transcription Pipeline
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(transcriptionCtrl, "TranscriptionBlockController", "Spring MVC — /api/transcription", "CRUD for transcription text blocks per document page. Manages sort order, review status, and block version history.")
|
||||
Component(annotationCtrl, "AnnotationController", "Spring MVC — /api/documents/{id}/annotations", "CRUD for free-form page annotations with polygon coordinates, colour coding, and file-hash tracking.")
|
||||
Component(commentCtrl, "CommentController", "Spring MVC — /api/documents/{id}/comments", "Threaded comment CRUD on transcription blocks with @mention support and notification triggers.")
|
||||
|
||||
Component(transcriptionSvc, "TranscriptionService", "Spring Service", "Creates and updates transcription blocks from annotation regions. Tracks block versions, sanitizes text with an HTML allow-list, and triggers mentions.")
|
||||
Component(transcriptionQueueSvc, "TranscriptionQueueService", "Spring Service", "Assembles segmentation, transcription, and review queue projections by delegating to DocumentService and AuditLogQueryService.")
|
||||
Component(annotationSvc, "AnnotationService", "Spring Service", "Manages document page annotations with polygon coordinates. Called by OcrAsyncRunner to persist OCR-generated block boundaries.")
|
||||
Component(commentSvc, "CommentService", "Spring Service", "Creates and manages threaded comments with @mention parsing. Triggers NotificationService for REPLY and MENTION events.")
|
||||
|
||||
Component(blockRepo, "TranscriptionBlockRepository", "Spring Data JPA", "Reads and writes TranscriptionBlock and TranscriptionBlockVersion records.")
|
||||
Component(annotationRepo, "AnnotationRepository", "Spring Data JPA", "Reads and writes DocumentAnnotation records.")
|
||||
Component(commentRepo, "CommentRepository", "Spring Data JPA", "Reads and writes DocumentComment records.")
|
||||
}
|
||||
|
||||
Component(documentSvc, "DocumentService", "Spring Service", "See diagram 3b. Called by TranscriptionQueueService to assemble pipeline queue projections.")
|
||||
Component(auditQuerySvc, "AuditLogQueryService", "Spring Service", "See diagram 3g. Called by TranscriptionQueueService for pipeline activity data.")
|
||||
|
||||
Rel(frontend, transcriptionCtrl, "Transcription block requests", "HTTP / JSON")
|
||||
Rel(frontend, annotationCtrl, "Annotation requests", "HTTP / JSON")
|
||||
Rel(frontend, commentCtrl, "Comment requests", "HTTP / JSON")
|
||||
Rel(transcriptionCtrl, transcriptionSvc, "Delegates to", "")
|
||||
Rel(transcriptionCtrl, transcriptionQueueSvc, "Queries pipeline queues", "")
|
||||
Rel(annotationCtrl, annotationSvc, "Delegates to", "")
|
||||
Rel(commentCtrl, commentSvc, "Delegates to", "")
|
||||
Rel(transcriptionSvc, blockRepo, "Reads / writes blocks and versions", "")
|
||||
Rel(annotationSvc, annotationRepo, "Reads / writes annotations", "")
|
||||
Rel(commentSvc, commentRepo, "Reads / writes comments", "")
|
||||
Rel(transcriptionQueueSvc, documentSvc, "Queries pipeline document state", "")
|
||||
Rel(transcriptionQueueSvc, auditQuerySvc, "Queries pipeline activity data", "")
|
||||
Rel(blockRepo, db, "SQL queries", "JDBC")
|
||||
Rel(annotationRepo, db, "SQL queries", "JDBC")
|
||||
Rel(commentRepo, db, "SQL queries", "JDBC")
|
||||
```
|
||||
|
||||
### 3d — Users, Groups & Administration
|
||||
|
||||
User lifecycle, permission groups, tag management, and authentication endpoints.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: API Backend — Users, Groups & Administration
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(userCtrl, "UserController", "Spring MVC — /api/users", "Returns current user (/me), creates and deletes users (requires ADMIN_USER), supports user search and profile updates.")
|
||||
Component(groupCtrl, "GroupController", "Spring MVC — /api/groups", "Lists and manages permission groups.")
|
||||
Component(tagCtrl, "TagController", "Spring MVC — /api/tags", "Lists tags for typeahead, supports tag merge, tree structure, and subtree deletion.")
|
||||
Component(inviteCtrl, "InviteController", "Spring MVC — /api/auth/invite", "Creates invite codes and validates them at registration time. Rate-limited via WebConfig interceptor.")
|
||||
Component(authCtrl, "AuthController", "Spring MVC — /api/auth", "Handles user registration (POST /register) and password reset token endpoints (/forgot-password, /reset-password).")
|
||||
|
||||
Component(userSvc, "UserService", "Spring Service", "User CRUD with BCrypt password encoding, group assignment, and audit logging. Orchestrates invite-based registration and password reset tokens.")
|
||||
Component(tagSvc, "TagService", "Spring Service", "Tag CRUD with name search, hierarchical tree structure, merge/reparent operations, and recursive subtree deletion.")
|
||||
Component(dataInit, "DataInitializer", "CommandLineRunner", "On startup: creates default admin user and groups if none exist. Seeds test data if DB is empty.")
|
||||
|
||||
Component(userRepo, "AppUserRepository", "Spring Data JPA", "Finds users by email. Supports search by email or display name.")
|
||||
Component(groupRepo, "UserGroupRepository", "Spring Data JPA", "Manages permission groups.")
|
||||
Component(tagRepo, "TagRepository", "Spring Data JPA", "Finds or creates tags by name (case-insensitive). Supports recursive ancestor/descendant CTE queries and merge/reparent helpers.")
|
||||
}
|
||||
|
||||
Rel(frontend, userCtrl, "User requests", "HTTP / JSON")
|
||||
Rel(frontend, groupCtrl, "Group requests", "HTTP / JSON")
|
||||
Rel(frontend, tagCtrl, "Tag requests", "HTTP / JSON")
|
||||
Rel(frontend, inviteCtrl, "Invite validation", "HTTP / JSON")
|
||||
Rel(frontend, authCtrl, "Registration and password reset", "HTTP / JSON")
|
||||
Rel(userCtrl, userSvc, "Delegates to", "")
|
||||
Rel(groupCtrl, userSvc, "Delegates to", "")
|
||||
Rel(tagCtrl, tagSvc, "Delegates to", "")
|
||||
Rel(tagSvc, tagRepo, "Reads / writes tags", "")
|
||||
Rel(inviteCtrl, userSvc, "Creates and validates invites", "")
|
||||
Rel(authCtrl, userSvc, "Registers users, resets passwords", "")
|
||||
Rel(userSvc, userRepo, "Reads / writes users", "")
|
||||
Rel(userSvc, groupRepo, "Assigns groups", "")
|
||||
Rel(userDetails, userRepo, "Loads user by username", "")
|
||||
|
||||
Rel(fileSvc, minio, "PUT / GET objects", "S3 API / HTTP")
|
||||
Rel(docRepo, db, "SQL queries", "JDBC")
|
||||
Rel(personRepo, db, "SQL queries", "JDBC")
|
||||
Rel(userRepo, db, "SQL queries", "JDBC")
|
||||
Rel(tagRepo, db, "SQL queries", "JDBC")
|
||||
Rel(groupRepo, db, "SQL queries", "JDBC")
|
||||
Rel(dataInit, db, "Seeds initial data", "JDBC")
|
||||
Rel(secConf, userDetails, "Wires", "")
|
||||
Rel(minioConf, fileSvc, "Provides S3Client bean", "")
|
||||
Rel(userRepo, db, "SQL queries", "JDBC")
|
||||
Rel(groupRepo, db, "SQL queries", "JDBC")
|
||||
Rel(tagRepo, db, "SQL queries", "JDBC")
|
||||
```
|
||||
|
||||
### 3e — Persons & Family Graph
|
||||
|
||||
Person management including family relationship modelling and transitive inference.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: API Backend — Persons & Family Graph
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(personCtrl, "PersonController", "Spring MVC — /api/persons", "Lists and searches family members. Returns documents sent by or received by a person, correspondent suggestions, and person summary with document counts.")
|
||||
Component(relCtrl, "RelationshipController", "Spring MVC — /api/network, /api/persons/{id}/relationships", "CRUD for explicit person relationships and the full family network graph (nodes + edges) used by the Stammbaum view.")
|
||||
|
||||
Component(personSvc, "PersonService", "Spring Service", "Person CRUD, alias management, and merge operations (reassigns all document sender/receiver references before deleting duplicate persons).")
|
||||
Component(relSvc, "RelationshipService", "Spring Service", "Manages explicit directional family relationships (PARENT_OF, SPOUSE_OF, SIBLING_OF, etc.) with optional date ranges and notes.")
|
||||
Component(relInference, "RelationshipInferenceService", "Spring Service", "Computes transitive family relationships from explicit edges to infer grandparent/grandchild, aunt/uncle, and other extended-family links for the network graph.")
|
||||
|
||||
Component(personRepo, "PersonRepository", "Spring Data JPA", "Queries persons with name search (including aliases), correspondent discovery, person summaries with document counts, and merge/reassignment helpers.")
|
||||
Component(relRepo, "PersonRelationshipRepository", "Spring Data JPA", "Reads and writes PersonRelationship records. Supports lookup by person ID, by relation type, and existence checks for deduplication.")
|
||||
}
|
||||
|
||||
Rel(frontend, personCtrl, "Person requests", "HTTP / JSON")
|
||||
Rel(frontend, relCtrl, "Relationship and graph requests", "HTTP / JSON")
|
||||
Rel(personCtrl, personSvc, "Delegates to", "")
|
||||
Rel(relCtrl, relSvc, "Delegates to", "")
|
||||
Rel(relCtrl, relInference, "Queries inferred graph", "")
|
||||
Rel(personSvc, personRepo, "Reads / writes persons", "")
|
||||
Rel(relSvc, relRepo, "Reads / writes relationships", "")
|
||||
Rel(relInference, relRepo, "Reads relationships for inference", "")
|
||||
Rel(personRepo, db, "SQL queries", "JDBC")
|
||||
Rel(relRepo, db, "SQL queries", "JDBC")
|
||||
```
|
||||
|
||||
### 3f — OCR Orchestration
|
||||
|
||||
How the Spring Boot backend manages OCR jobs, streams results, and trains recognition models.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: API Backend — OCR Orchestration
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL")
|
||||
ContainerDb(minio, "MinIO")
|
||||
Container(ocrPy, "OCR Service", "Python FastAPI")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(ocrCtrl, "OcrController", "Spring MVC — /api/ocr", "REST entry point: trigger single or batch OCR jobs, stream progress via SSE, query job status, and manage training runs and per-sender models.")
|
||||
Component(ocrSvc, "OcrService", "Spring Service", "Creates OcrJob and OcrJobDocument records, checks Python service health, and delegates async execution to OcrAsyncRunner.")
|
||||
Component(ocrBatch, "OcrBatchService", "Spring Service", "Orchestrates multi-document OCR jobs, iterating documents and delegating each to OcrAsyncRunner.")
|
||||
Component(ocrAsync, "OcrAsyncRunner", "Spring Component — @Async", "Async worker that streams OCR results from Python page by page, persists transcription blocks and annotations via domain services, and emits progress via SSE.")
|
||||
Component(ocrClient, "RestClientOcrClient", "Spring Component", "HTTP client wrapping the Python service: POST /ocr/stream (NDJSON), /train, /segtrain, and /train-sender. Falls back from streaming to batch on 404.")
|
||||
Component(ocrTraining, "OcrTrainingService", "Spring Service", "Orchestrates model training: exports training data as ZIP, calls Python /train or /segtrain, persists training metrics in OcrTrainingRunRepository.")
|
||||
Component(ocrJobRepo, "OcrJobRepository, OcrJobDocumentRepository", "Spring Data JPA", "Reads and writes OcrJob and OcrJobDocument records. Tracks job status (RUNNING/DONE/FAILED), per-document progress, page counts, and error messages.")
|
||||
}
|
||||
|
||||
Component(transcriptionSvc, "TranscriptionService", "Spring Service", "See diagram 3c. Called by OcrAsyncRunner to persist transcription blocks per page.")
|
||||
Component(annotationSvc, "AnnotationService", "Spring Service", "See diagram 3c. Called by OcrAsyncRunner to persist OCR-generated annotation regions per page.")
|
||||
|
||||
Rel(frontend, ocrCtrl, "OCR trigger, status, and progress requests", "HTTP / JSON / SSE")
|
||||
Rel(ocrCtrl, ocrSvc, "Single-document jobs", "")
|
||||
Rel(ocrCtrl, ocrBatch, "Batch jobs", "")
|
||||
Rel(ocrCtrl, ocrTraining, "Training runs", "")
|
||||
Rel(ocrSvc, ocrAsync, "Delegates async execution", "")
|
||||
Rel(ocrBatch, ocrAsync, "Delegates async execution", "")
|
||||
Rel(ocrAsync, ocrClient, "Streams OCR results page by page", "HTTP / NDJSON")
|
||||
Rel(ocrTraining, ocrClient, "Sends training data ZIP", "HTTP / multipart")
|
||||
Rel(ocrClient, ocrPy, "POST /ocr/stream, /train, /segtrain, /train-sender", "HTTP / REST")
|
||||
Rel(ocrAsync, transcriptionSvc, "Saves transcription blocks per page", "")
|
||||
Rel(ocrAsync, annotationSvc, "Saves annotation regions per page", "")
|
||||
Rel(ocrAsync, ocrJobRepo, "Reads / writes OCR job state", "")
|
||||
Rel(ocrJobRepo, db, "SQL queries", "JDBC")
|
||||
Rel(ocrAsync, minio, "Generates presigned URLs for PDF fetch", "S3 API")
|
||||
Rel(ocrPy, minio, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
|
||||
Rel(ocrTraining, db, "Persists training run metrics", "JDBC")
|
||||
```
|
||||
|
||||
### 3g — Supporting Domains
|
||||
|
||||
Audit logging, dashboard stats, SSE notifications, stories (Geschichten), and cross-cutting exception handling.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: API Backend — Supporting Domains
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(auditSvc, "AuditService", "Spring Service — @Async", "Writes audit log entries asynchronously via a dedicated TaskExecutor, with transaction-aware logging to prevent deadlocks on concurrent saves.")
|
||||
Component(auditQuery, "AuditLogQueryService", "Spring Service", "Queries audit logs for activity feeds, pulse stats, recent contributors, and per-document history. Facade over AuditLogRepository.")
|
||||
|
||||
Component(dashCtrl, "DashboardController", "Spring MVC — /api/dashboard", "REST endpoints for the user dashboard: recent document resume (/resume), weekly transcription pulse stats (/pulse), and activity feed (/activity) with kind filtering and pagination.")
|
||||
Component(statsCtrl, "StatsController", "Spring MVC — /api/stats", "Returns aggregate counts (total persons, total documents) for the UI stats bar.")
|
||||
Component(statsSvc, "StatsService", "Spring Service", "Queries aggregate counts: total persons and total documents.")
|
||||
Component(dashSvc, "DashboardService", "Spring Service", "Assembles the user dashboard: recent document resume (calls DocumentService + TranscriptionService), weekly transcription pulse stats, and activity feed with contributor avatars.")
|
||||
|
||||
Component(notifCtrl, "NotificationController", "Spring MVC — /api/notifications", "REST and SSE endpoints for notification stream, history with filtering, read/unread state, and per-user preference management.")
|
||||
Component(notifSvc, "NotificationService", "Spring Service", "Creates REPLY and MENTION notifications, optionally sends email, marks as read, and pushes events to connected clients via SseEmitterRegistry.")
|
||||
Component(sseRegistry, "SseEmitterRegistry", "Spring Component", "In-memory ConcurrentHashMap of Spring SseEmitter instances per user. Handles registration, deregistration, and JSON event broadcasts.")
|
||||
|
||||
Component(geschCtrl, "GeschichteController", "Spring MVC — /api/geschichten", "CRUD for publishable stories that link persons and documents. Requires BLOG_WRITE permission for write operations.")
|
||||
Component(geschSvc, "GeschichteService", "Spring Service", "Manages story lifecycle (DRAFT → PUBLISHED with timestamp). Sanitizes HTML body with an allowlist policy.")
|
||||
|
||||
Component(exHandler, "GlobalExceptionHandler", "Spring @RestControllerAdvice", "Converts DomainException, validation errors, and generic exceptions to ErrorResponse JSON with machine-readable ErrorCode and HTTP status.")
|
||||
}
|
||||
|
||||
Component(documentSvc, "DocumentService", "Spring Service", "See diagram 3b. Called by DashboardService to fetch document titles and resume data.")
|
||||
Component(transcriptionSvc, "TranscriptionService", "Spring Service", "See diagram 3c. Called by DashboardService to fetch transcription block progress for resume.")
|
||||
|
||||
Rel(frontend, dashCtrl, "Dashboard requests", "HTTP / JSON")
|
||||
Rel(frontend, statsCtrl, "GET /api/stats", "HTTP / JSON")
|
||||
Rel(frontend, notifCtrl, "Notification stream and history", "HTTP / JSON / SSE")
|
||||
Rel(frontend, geschCtrl, "Story requests", "HTTP / JSON")
|
||||
Rel(dashCtrl, dashSvc, "Delegates to", "")
|
||||
Rel(statsCtrl, statsSvc, "Delegates to", "")
|
||||
Rel(statsSvc, db, "Reads aggregate counts", "JDBC")
|
||||
Rel(dashSvc, auditQuery, "Fetches activity feed and pulse stats", "")
|
||||
Rel(dashSvc, documentSvc, "Fetches document titles and resume data", "")
|
||||
Rel(dashSvc, transcriptionSvc, "Fetches transcription block progress for resume", "")
|
||||
Rel(notifCtrl, notifSvc, "Delegates to", "")
|
||||
Rel(notifCtrl, sseRegistry, "Registers client SSE connection", "")
|
||||
Rel(notifSvc, sseRegistry, "Broadcasts events to connected clients", "")
|
||||
Rel(geschCtrl, geschSvc, "Delegates to", "")
|
||||
Rel(auditSvc, db, "Writes audit_log", "JDBC")
|
||||
Rel(auditQuery, db, "Reads audit_log", "JDBC")
|
||||
Rel(notifSvc, db, "Reads / writes notifications", "JDBC")
|
||||
Rel(geschSvc, db, "Reads / writes geschichten", "JDBC")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Level 3 — Components: Web Frontend
|
||||
|
||||
The internal structure of the SvelteKit frontend.
|
||||
The internal structure of the SvelteKit frontend, split into four focused views.
|
||||
|
||||
### 3a — Middleware, Auth & Layout
|
||||
|
||||
Per-request middleware: session validation, i18n, auth cookie handling, and auth pages.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: Web Frontend
|
||||
title Component Diagram: Web Frontend — Middleware, Auth & Layout
|
||||
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
|
||||
Component(hooks, "hooks.server.ts", "SvelteKit Server Hook", "Two responsibilities: (1) userGroup handle — reads auth_token cookie, fetches /api/users/me, stores user in event.locals. (2) handleFetch — intercepts all outgoing fetch() calls, injects Authorization header from cookie. Redirects to /login if token absent.")
|
||||
Component(hooks, "hooks.server.ts", "SvelteKit Server Hook", "Four handle layers: (1) handleAuth — redirects unauthenticated users to /login; (2) userGroup — reads auth_token cookie, fetches /api/users/me, stores user in event.locals; (3) handleFetch — injects Authorization header on all outgoing /api/ calls; (4) handleLocaleDetection — sets language cookie from Accept-Language header.")
|
||||
Component(i18n, "hooks.ts (Paraglide)", "SvelteKit Client Hook", "Client-side i18n middleware. Detects language from URL and sets the active locale for Paraglide.js translation functions.")
|
||||
|
||||
Component(layout, "+layout.server.ts", "SvelteKit Layout Loader", "Passes event.locals.user down to all child pages so every route has access to the authenticated user.")
|
||||
|
||||
Component(homePage, "/ (Home / Search)", "SvelteKit Route", "Loader: parses URL search params (q, from, to, senderId, receiverId, tags), fetches /api/documents/search and /api/persons, returns results. Page: renders search form with full-text, date range, sender/receiver typeahead, tag filters. Displays paginated document list.")
|
||||
Component(docDetail, "/documents/[id]", "SvelteKit Route", "Loader: fetches /api/documents/{id}. Handles 401 redirect to login, 404 error. Page: shows document metadata, file viewer (PDF/image inline), transcription, tags.")
|
||||
Component(docEdit, "/documents/[id]/edit", "SvelteKit Route", "Form with PersonTypeahead for sender/receiver, TagInput for tags, date/location fields. Submits PUT to /api/documents/{id}.")
|
||||
Component(persons, "/persons and /persons/[id]", "SvelteKit Routes", "Lists all persons. Detail page shows person metadata and all documents they sent.")
|
||||
Component(conversations, "/conversations", "SvelteKit Route", "Selects two persons via PersonTypeahead, fetches /api/documents/conversation, displays chronological exchange.")
|
||||
Component(loginPage, "/login", "SvelteKit Route", "Form action: encodes username:password as Base64 Basic Auth token, POSTs to /api/users/me to validate, sets auth_token httpOnly cookie (SameSite=strict, maxAge=86400), redirects to /.")
|
||||
Component(loginPage, "/login", "SvelteKit Route", "Form action: encodes email:password as Base64 Basic Auth token, POSTs to /api/users/me to validate, sets auth_token httpOnly cookie (SameSite=strict, maxAge=86400), redirects to /.")
|
||||
Component(logoutPage, "/logout", "SvelteKit Route (server-only)", "Clears the auth_token cookie and redirects to /login.")
|
||||
Component(adminPage, "/admin", "SvelteKit Route", "User management UI (create/delete users). Excel import trigger button (calls /api/admin/trigger-import).")
|
||||
|
||||
Component(apiPersons, "/api/persons (SvelteKit API)", "SvelteKit Server Route", "Proxies GET /api/persons?q=... to backend. Used by PersonTypeahead for typeahead suggestions.")
|
||||
Component(apiTags, "/api/tags (SvelteKit API)", "SvelteKit Server Route", "Proxies GET /api/tags to backend. Used by TagInput for autocomplete.")
|
||||
|
||||
Component(typeahead, "PersonTypeahead.svelte", "Svelte Component", "Async autocomplete for selecting a person. Debounces input, calls /api/persons?q=.")
|
||||
Component(tagInput, "TagInput.svelte", "Svelte Component", "Multi-tag input. Supports free-text entry and selecting existing tags from /api/tags.")
|
||||
Component(registerPage, "/register", "SvelteKit Route", "Loader validates invite code via GET /api/auth/invite/{code}. Form action: POST /api/auth/register to create the user account.")
|
||||
Component(forgotPw, "/forgot-password", "SvelteKit Route", "Form action: POST /api/auth/forgot-password. Always responds with success to prevent email enumeration.")
|
||||
Component(resetPw, "/reset-password", "SvelteKit Route", "Form action: POST /api/auth/reset-password with the token from the query string.")
|
||||
}
|
||||
|
||||
Rel(user, hooks, "Every browser request", "HTTPS")
|
||||
Rel(hooks, backend, "GET /api/users/me (session check)", "HTTP / Basic Auth")
|
||||
Rel(hooks, loginPage, "Redirect if no token", "")
|
||||
|
||||
Rel(layout, homePage, "Provides user context", "")
|
||||
Rel(layout, docDetail, "Provides user context", "")
|
||||
Rel(layout, adminPage, "Provides user context", "")
|
||||
|
||||
Rel(homePage, backend, "GET /api/documents/search", "HTTP / JSON")
|
||||
Rel(homePage, backend, "GET /api/persons", "HTTP / JSON")
|
||||
Rel(docDetail, backend, "GET /api/documents/{id}", "HTTP / JSON")
|
||||
Rel(docDetail, backend, "GET /api/documents/{id}/file", "HTTP / Binary stream")
|
||||
Rel(docEdit, backend, "PUT /api/documents/{id}", "HTTP / Multipart")
|
||||
Rel(conversations, backend, "GET /api/documents/conversation", "HTTP / JSON")
|
||||
Rel(hooks, layout, "Stores authenticated user in event.locals", "")
|
||||
Rel(loginPage, backend, "POST /api/users/me (auth check)", "HTTP / Basic Auth")
|
||||
Rel(adminPage, backend, "GET/POST/DELETE /api/users", "HTTP / JSON")
|
||||
Rel(adminPage, backend, "POST /api/admin/trigger-import", "HTTP / JSON")
|
||||
Rel(registerPage, backend, "GET /api/auth/invite/{code}, POST /api/auth/register", "HTTP / JSON")
|
||||
Rel(forgotPw, backend, "POST /api/auth/forgot-password", "HTTP / JSON")
|
||||
Rel(resetPw, backend, "POST /api/auth/reset-password", "HTTP / JSON")
|
||||
```
|
||||
|
||||
Rel(apiPersons, backend, "GET /api/persons", "HTTP / JSON")
|
||||
Rel(apiTags, backend, "GET /api/tags", "HTTP / JSON")
|
||||
### 3b — Document Workflows
|
||||
|
||||
Document search, viewing, editing, enrichment, and the shared components that support them.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: Web Frontend — Document Workflows
|
||||
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
Component(homePage, "/ (Home / Search)", "SvelteKit Route", "Loader: parses URL params (q, from, to, senderId, receiverId, tags), fetches /api/documents/search and /api/persons. Renders search form with full-text, date range, sender/receiver typeahead, and tag filters.")
|
||||
Component(docDetail, "/documents/[id]", "SvelteKit Route", "Loader: GET /api/documents/{id}. Page: metadata panel, inline file viewer, transcription editor, annotation layer, and comment thread.")
|
||||
Component(docEdit, "/documents/[id]/edit", "SvelteKit Route", "Edit form with PersonTypeahead, TagInput, date/location fields. Form action: PUT /api/documents/{id}.")
|
||||
Component(docNew, "/documents/new", "SvelteKit Route", "Upload form for a new document. Loader: GET /api/persons. Form action: POST /api/documents with multipart file.")
|
||||
Component(docBulkEdit, "/documents/bulk-edit", "SvelteKit Route", "Multi-document metadata editor. Loader: GET /api/documents/incomplete. Requires WRITE_ALL (redirects otherwise). Action: PATCH /api/documents/bulk.")
|
||||
Component(enrichPage, "/enrich/[id]", "SvelteKit Route", "Guided enrichment workflow. Loader: GET /api/documents/{id}. Progressively saves annotations and transcription blocks.")
|
||||
|
||||
Component(apiPersons, "/api/persons (SvelteKit API)", "SvelteKit Server Route", "Proxies GET /api/persons?q=... to backend for PersonTypeahead suggestions.")
|
||||
Component(apiTags, "/api/tags (SvelteKit API)", "SvelteKit Server Route", "Proxies GET /api/tags to backend for TagInput autocomplete.")
|
||||
Component(typeahead, "PersonTypeahead.svelte", "Svelte Component", "Async autocomplete for selecting a person. Debounces input, calls /api/persons?q=.")
|
||||
Component(tagInput, "TagInput.svelte", "Svelte Component", "Multi-tag input. Supports free-text entry and selecting existing tags from /api/tags.")
|
||||
}
|
||||
|
||||
Rel(user, homePage, "Searches and browses", "HTTPS / Browser")
|
||||
Rel(homePage, backend, "GET /api/documents/search, GET /api/persons", "HTTP / JSON")
|
||||
Rel(docDetail, backend, "GET /api/documents/{id}, GET /api/documents/{id}/file", "HTTP / JSON + Binary")
|
||||
Rel(docEdit, backend, "PUT /api/documents/{id}", "HTTP / Multipart")
|
||||
Rel(docNew, backend, "GET /api/persons, POST /api/documents", "HTTP / JSON + Multipart")
|
||||
Rel(docBulkEdit, backend, "GET /api/documents/incomplete, PATCH /api/documents/bulk", "HTTP / JSON")
|
||||
Rel(enrichPage, backend, "GET/POST /api/transcription, POST /api/documents/{id}/annotations", "HTTP / JSON")
|
||||
Rel(homePage, typeahead, "Uses for sender/receiver filter", "")
|
||||
Rel(docEdit, typeahead, "Uses for sender/receiver selection", "")
|
||||
Rel(docNew, typeahead, "Uses for sender selection", "")
|
||||
Rel(docEdit, tagInput, "Uses for tag management", "")
|
||||
Rel(typeahead, apiPersons, "Fetches suggestions", "HTTP")
|
||||
Rel(tagInput, apiTags, "Fetches existing tags", "HTTP")
|
||||
Rel(apiPersons, backend, "GET /api/persons", "HTTP / JSON")
|
||||
Rel(apiTags, backend, "GET /api/tags", "HTTP / JSON")
|
||||
```
|
||||
|
||||
### 3c — People, Stories & Discovery
|
||||
|
||||
Person directory, bilateral conversations, activity feed, stories, family tree, and user profiles.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: Web Frontend — People, Stories & Discovery
|
||||
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
Component(personsPage, "/persons and /persons/[id]", "SvelteKit Routes", "Person directory and detail. Detail: metadata, document list sent/received, correspondents, explicit and inferred family relationships.")
|
||||
Component(personEdit, "/persons/[id]/edit and /persons/new", "SvelteKit Routes", "Create and edit person forms. Edit: metadata, aliases, explicit relationships. Actions: PUT/POST /api/persons.")
|
||||
Component(briefwechsel, "/briefwechsel", "SvelteKit Route", "Bilateral conversation timeline. Selects two persons via PersonTypeahead, fetches GET /api/documents/conversation, displays chronological exchange.")
|
||||
Component(aktivitaeten, "/aktivitaeten", "SvelteKit Route", "Unified activity feed (Chronik). Loader: GET /api/dashboard/activity and GET /api/notifications?read=false.")
|
||||
Component(geschichten, "/geschichten and /geschichten/[id]", "SvelteKit Routes", "Story list and detail pages. Loader: GET /api/geschichten?status=PUBLISHED.")
|
||||
Component(geschichtenEdit, "/geschichten/[id]/edit and /geschichten/new", "SvelteKit Routes", "Story editor with rich text, person and document linking. Actions: PUT/POST /api/geschichten. Requires BLOG_WRITE permission.")
|
||||
Component(stammbaum, "/stammbaum", "SvelteKit Route", "Family tree visualisation. Loader: GET /api/network (nodes + edges). Renders interactive family tree from network graph data.")
|
||||
Component(profilePage, "/profile", "SvelteKit Route", "Current user profile settings. Loader: GET /api/users/me/notification-preferences. Actions: update name/password and notification preferences.")
|
||||
Component(userProfile, "/users/[id]", "SvelteKit Route", "Public user profile view. Loader: GET /api/users/{id}.")
|
||||
}
|
||||
|
||||
Rel(user, personsPage, "Browses family members", "HTTPS / Browser")
|
||||
Rel(personsPage, backend, "GET /api/persons, GET /api/persons/{id}", "HTTP / JSON")
|
||||
Rel(personEdit, backend, "GET /api/persons/{id}, PUT /api/persons/{id}, POST /api/persons", "HTTP / JSON")
|
||||
Rel(briefwechsel, backend, "GET /api/documents/conversation", "HTTP / JSON")
|
||||
Rel(aktivitaeten, backend, "GET /api/dashboard/activity, GET /api/notifications", "HTTP / JSON")
|
||||
Rel(geschichten, backend, "GET /api/geschichten", "HTTP / JSON")
|
||||
Rel(geschichtenEdit, backend, "GET/PUT/POST /api/geschichten", "HTTP / JSON")
|
||||
Rel(stammbaum, backend, "GET /api/network", "HTTP / JSON")
|
||||
Rel(profilePage, backend, "GET/PUT /api/users/me, notification-preferences", "HTTP / JSON")
|
||||
Rel(userProfile, backend, "GET /api/users/{id}", "HTTP / JSON")
|
||||
```
|
||||
|
||||
### 3d — Administration & Help
|
||||
|
||||
Admin panel sub-routes and the transcription help guide.
|
||||
|
||||
```mermaid
|
||||
C4Component
|
||||
title Component Diagram: Web Frontend — Administration & Help
|
||||
|
||||
Person(admin, "Administrator")
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
Component(adminUsers, "/admin/users, /admin/users/[id], /admin/users/new, /admin/invites", "SvelteKit Routes", "User directory, create/update/delete users, and manage invite codes. Requires ADMIN_USER permission.")
|
||||
Component(adminGroups, "/admin/groups, /admin/groups/[id], /admin/groups/new", "SvelteKit Routes", "Permission group management: create/edit groups and their permission sets.")
|
||||
Component(adminTags, "/admin/tags and /admin/tags/[id]", "SvelteKit Routes", "Tag administration: edit tag hierarchy, merge tags, delete subtrees.")
|
||||
Component(adminOcr, "/admin/ocr and /admin/ocr/[personId]", "SvelteKit Routes", "Global and per-person OCR configuration. Manages script types and triggers sender model training.")
|
||||
Component(adminSystem, "/admin/system", "SvelteKit Route", "System status panel. Triggers Excel/ODS mass import (POST /api/admin/trigger-import). Displays import state.")
|
||||
Component(hilfe, "/hilfe/transkription", "SvelteKit Route", "Static transcription style guide for Kurrent and Sütterlin character recognition. No backend calls.")
|
||||
}
|
||||
|
||||
Rel(admin, adminUsers, "Manages users and invites", "HTTPS / Browser")
|
||||
Rel(user, hilfe, "Views transcription style guide", "HTTPS / Browser")
|
||||
Rel(adminUsers, backend, "GET/POST/DELETE /api/users, POST /api/auth/invite", "HTTP / JSON")
|
||||
Rel(adminGroups, backend, "GET/POST/PUT/DELETE /api/groups", "HTTP / JSON")
|
||||
Rel(adminTags, backend, "GET/PUT/DELETE /api/tags", "HTTP / JSON")
|
||||
Rel(adminOcr, backend, "GET/POST /api/ocr (global config and sender training)", "HTTP / JSON")
|
||||
Rel(adminSystem, backend, "POST /api/admin/trigger-import, GET /api/admin/import-status", "HTTP / JSON")
|
||||
```
|
||||
|
||||
---
|
||||
@@ -211,12 +519,12 @@ sequenceDiagram
|
||||
participant Backend as Backend (Spring Boot)
|
||||
participant DB as PostgreSQL
|
||||
|
||||
User->>Browser: Enter username + password
|
||||
User->>Browser: Enter email + password
|
||||
Browser->>Frontend: POST /login (form action)
|
||||
Frontend->>Frontend: Base64 encode "user:password"
|
||||
Frontend->>Frontend: Base64 encode "email:password"
|
||||
Frontend->>Backend: GET /api/users/me<br/>Authorization: Basic <token>
|
||||
Backend->>Backend: Spring Security parses Basic Auth
|
||||
Backend->>DB: SELECT user WHERE username=?
|
||||
Backend->>DB: SELECT user WHERE email=?
|
||||
DB-->>Backend: AppUser + groups + permissions
|
||||
Backend->>Backend: BCrypt.matches(password, hash)
|
||||
Backend-->>Frontend: 200 OK — UserDTO
|
||||
@@ -264,3 +572,14 @@ sequenceDiagram
|
||||
Backend-->>Frontend: 200 OK — Document JSON
|
||||
Frontend-->>User: Refreshed document view
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Database
|
||||
|
||||
Entity-relationship and full column reference for the PostgreSQL schema (30 tables, 7 domain groups). Source files in `docs/architecture/db/`.
|
||||
|
||||
- **[db-relationships.puml](db/db-relationships.puml)** — Entity relationships: all tables and foreign-key connections, grouped by domain. Start here for an overview.
|
||||
- **[db-orm.puml](db/db-orm.puml)** — Full schema reference: all columns and types for all 30 tables. Use this when mapping Java entities to database columns.
|
||||
|
||||
> Schema as of Flyway V60 (2026-05-06). Open in VS Code with the PlantUML extension (server: `http://heim-nas:8500`).
|
||||
|
||||
39
docs/architecture/c4/README.md
Normal file
39
docs/architecture/c4/README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# C4-PlantUML Diagrams
|
||||
|
||||
Architecture diagrams in C4-PlantUML format. These are the authoritative source for layout-accurate diagrams. The companion `c4-diagrams.md` in the parent directory keeps Mermaid versions for inline Gitea rendering.
|
||||
|
||||
## Render in Gitea
|
||||
|
||||
Gitea is configured to render `.puml` files as diagrams. Open any `.puml` file in the Gitea UI to see the rendered diagram.
|
||||
|
||||
> **Note:** `plantuml` code fences inside Markdown files do **not** render inline in Gitea — this is a Gitea limitation unrelated to the server configuration. The `.md` files in this repo use Mermaid for that reason.
|
||||
|
||||
## Render in VS Code
|
||||
|
||||
Install the [PlantUML extension](https://marketplace.visualstudio.com/items?itemName=jebbs.plantuml) (`jebbs.plantuml`). The project's `.vscode/settings.json` already points at the shared server:
|
||||
|
||||
```
|
||||
plantuml.server = http://heim-nas:8500
|
||||
```
|
||||
|
||||
Open any `.puml` file and press `Alt+D` to preview.
|
||||
|
||||
## Files
|
||||
|
||||
| File | Diagram |
|
||||
|---|---|
|
||||
| `l1-context.puml` | Level 1 — System Context |
|
||||
| `l2-containers.puml` | Level 2 — Containers |
|
||||
| `l3-backend-3a-security.puml` | L3 Backend: Security & Authentication |
|
||||
| `l3-backend-3b-document-management.puml` | L3 Backend: Document Management & Import |
|
||||
| `l3-backend-3c-transcription.puml` | L3 Backend: Document Transcription Pipeline |
|
||||
| `l3-backend-3d-users-groups.puml` | L3 Backend: Users, Groups & Administration |
|
||||
| `l3-backend-3e-persons.puml` | L3 Backend: Persons & Family Graph |
|
||||
| `l3-backend-3f-ocr.puml` | L3 Backend: OCR Orchestration |
|
||||
| `l3-backend-3g-supporting.puml` | L3 Backend: Supporting Domains |
|
||||
| `l3-frontend-3a-middleware-auth.puml` | L3 Frontend: Middleware, Auth & Layout |
|
||||
| `l3-frontend-3b-document-workflows.puml` | L3 Frontend: Document Workflows |
|
||||
| `l3-frontend-3c-people-stories.puml` | L3 Frontend: People, Stories & Discovery |
|
||||
| `l3-frontend-3d-administration.puml` | L3 Frontend: Administration & Help |
|
||||
| `seq-auth-flow.puml` | Sequence: Authentication Flow |
|
||||
| `seq-document-upload.puml` | Sequence: Document Upload Flow |
|
||||
16
docs/architecture/c4/l1-context.puml
Normal file
16
docs/architecture/c4/l1-context.puml
Normal file
@@ -0,0 +1,16 @@
|
||||
@startuml
|
||||
!include <C4/C4_Context>
|
||||
|
||||
title System Context: Familienarchiv
|
||||
|
||||
Person(admin, "Administrator", "Manages users, triggers bulk imports, reviews and transcribes documents")
|
||||
Person(member, "Family Member", "Access by administrator invite. Searches, browses, reads, and transcribes archived documents.")
|
||||
|
||||
System(familienarchiv, "Familienarchiv", "Web application for digitising, organising, and searching family documents")
|
||||
System_Ext(mail, "Email Service", "SMTP server. Delivers notification emails (mentions, replies) and password-reset links.")
|
||||
|
||||
Rel(admin, familienarchiv, "Manages via browser", "HTTPS")
|
||||
Rel(member, familienarchiv, "Searches, reads, and transcribes via browser", "HTTPS")
|
||||
Rel(familienarchiv, mail, "Sends notification and password-reset emails (optional)", "SMTP")
|
||||
|
||||
@enduml
|
||||
32
docs/architecture/c4/l2-containers.puml
Normal file
32
docs/architecture/c4/l2-containers.puml
Normal file
@@ -0,0 +1,32 @@
|
||||
@startuml
|
||||
!include <C4/C4_Container>
|
||||
|
||||
title Container Diagram: Familienarchiv
|
||||
|
||||
Person(user, "User", "Admin or family member")
|
||||
System_Ext(mail, "Email Service", "SMTP server. Delivers notification and password-reset emails.")
|
||||
|
||||
Container(caddy, "Reverse Proxy", "Caddy 2 (host-installed)", "TLS termination (auto Let's Encrypt). Routes /api/* to backend:8080, everything else to frontend:3000. Responds 404 on /actuator/* and adds HSTS, X-Content-Type-Options, Referrer-Policy headers.")
|
||||
|
||||
System_Boundary(archiv, "Familienarchiv (Docker Compose)") {
|
||||
Container(frontend, "Web Frontend", "SvelteKit / Node adapter / port 3000", "Server-side rendered UI. Handles auth session cookies, document search and viewer, transcription editor, annotation layer, family tree (Stammbaum), stories (Geschichten), activity feed (Chronik), enrichment workflow, and admin panel.")
|
||||
Container(backend, "API Backend", "Spring Boot 4 / Java 21 / Jetty / port 8080", "REST API. Implements document management, search, user auth, file upload/download, transcription, OCR orchestration, and SSE notifications. Trusts X-Forwarded-* headers from Caddy.")
|
||||
Container(ocr, "OCR Service", "Python FastAPI / port 8000", "Handwritten text recognition (HTR) and OCR microservice. Single-node by design — see ADR-001. Reachable only on the internal Docker network; no external port exposed.")
|
||||
ContainerDb(db, "Relational Database", "PostgreSQL 16", "Stores document metadata, persons, users, permission groups, tags, transcription blocks, audit log, and Spring Session data.")
|
||||
ContainerDb(storage, "Object Storage", "MinIO (S3-compatible)", "Stores the actual document files (PDFs, scans). Backend uses a bucket-scoped service account (archiv-app), not MinIO root.")
|
||||
Container(mc, "Bucket / Service-Account Init", "MinIO Client (mc)", "One-shot container on startup. Idempotent: creates the archive bucket, the archiv-app service account, and attaches the readwrite policy.")
|
||||
}
|
||||
|
||||
Rel(user, caddy, "HTTPS", "TLS 1.2/1.3")
|
||||
Rel(caddy, frontend, "Reverse proxies non-/api requests", "HTTP / loopback:3000")
|
||||
Rel(caddy, backend, "Reverse proxies /api/*", "HTTP / loopback:8080")
|
||||
Rel(frontend, backend, "API requests with Basic Auth token", "HTTP / REST / JSON")
|
||||
Rel(backend, user, "SSE notifications (server-sent events)", "HTTP / SSE — fronted by Caddy")
|
||||
Rel(backend, db, "Reads and writes metadata and sessions", "JDBC / SQL")
|
||||
Rel(backend, storage, "Uploads and streams document files using archiv-app service account", "HTTP / S3 API (AWS SDK v2)")
|
||||
Rel(backend, ocr, "OCR job requests with presigned MinIO URL", "HTTP / REST / JSON")
|
||||
Rel(backend, mail, "Sends notification and password-reset emails (optional)", "SMTP")
|
||||
Rel(ocr, storage, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
|
||||
Rel(mc, storage, "Bootstraps bucket + service account on startup", "MinIO Client CLI")
|
||||
|
||||
@enduml
|
||||
21
docs/architecture/c4/l3-backend-3a-security.puml
Normal file
21
docs/architecture/c4/l3-backend-3a-security.puml
Normal file
@@ -0,0 +1,21 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: API Backend — Security & Authentication
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL", "PostgreSQL 16")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(secFilter, "Security Filter Chain", "Spring Security", "Enforces authentication on all requests. Parses Basic Auth header and constructs an Authentication token; delegates credential validation to DaoAuthenticationProvider via BCrypt. Permits password-reset, invite, and register endpoints without authentication.")
|
||||
Component(permAspect, "PermissionAspect", "Spring AOP", "Intercepts methods annotated with @RequirePermission. Checks user's granted authorities against the required permission. Throws 401/403 if denied.")
|
||||
Component(secConf, "SecurityConfig", "Spring @Configuration", "Configures filter chain: all routes require authentication, CSRF disabled, BCrypt password encoder, DaoAuthenticationProvider with CustomUserDetailsService.")
|
||||
Component(userDetails, "CustomUserDetailsService", "Spring Security UserDetailsService", "Loads AppUser by email from DB. Converts group permissions to Spring GrantedAuthority objects. Logs unknown permissions.")
|
||||
}
|
||||
|
||||
Rel(frontend, secFilter, "All requests", "HTTP / Basic Auth header")
|
||||
Rel(secFilter, permAspect, "Authenticated requests reach guarded service methods")
|
||||
Rel(secConf, userDetails, "Wires as UserDetailsService")
|
||||
Rel(userDetails, db, "Loads user by email", "JDBC")
|
||||
|
||||
@enduml
|
||||
40
docs/architecture/c4/l3-backend-3b-document-management.puml
Normal file
40
docs/architecture/c4/l3-backend-3b-document-management.puml
Normal file
@@ -0,0 +1,40 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: API Backend — Document Management & Import
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL", "PostgreSQL 16")
|
||||
ContainerDb(minio, "Object Storage", "MinIO (S3-compatible)")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(docCtrl, "DocumentController", "Spring MVC — /api/documents", "CRUD for documents: search, get by ID, update metadata, upload/download file, conversation thread, batch metadata updates, and per-month density aggregation for the timeline filter widget.")
|
||||
Component(adminCtrl, "AdminController", "Spring MVC — /api/admin", "Triggers asynchronous Excel/ODS mass import (requires ADMIN permission). Reports import state (IDLE/RUNNING/DONE/FAILED).")
|
||||
Component(docSvc, "DocumentService", "Spring Service", "Core document business logic: store, update, search. Resolves persons and tags, delegates file I/O to FileService, builds dynamic JPA Specifications, and integrates with audit logging.")
|
||||
Component(fileSvc, "FileService", "Spring Service", "Wraps AWS SDK v2 S3Client. Uploads files with UUID-keyed paths, computes SHA-256 hash, downloads with content-type detection, and generates presigned URLs for OCR access.")
|
||||
Component(massImport, "MassImportService", "Spring Service — @Async", "Reads Excel/ODS files from /import mount. Tracks import state (IDLE/RUNNING/DONE/FAILED) and delegates to ExcelService. Returns immediately; processing runs asynchronously.")
|
||||
Component(excelSvc, "ExcelService", "Spring Service", "Parses Excel/ODS workbooks (Apache POI). Column indices configurable via application.properties. Creates/updates document records per row.")
|
||||
Component(minioConf, "MinioConfig", "Spring @Configuration", "Creates the S3Client and S3Presigner beans with path-style access for MinIO. Validates MinIO connectivity on startup.")
|
||||
Component(docRepo, "DocumentRepository", "Spring Data JPA", "Queries documents with Specification-based dynamic search, bidirectional conversation thread queries, full-text search with ranking and match highlighting, and transcription pipeline queue projections.")
|
||||
Component(docSpec, "DocumentSpecifications", "JPA Criteria API", "Factory for composable predicates: hasText (full-text), hasSender, hasReceiver, isBetween (date range), hasTags (subquery AND/OR logic).")
|
||||
}
|
||||
|
||||
Component(personSvc, "PersonService", "Spring Service", "See diagram 3e. Called by DocumentService to resolve sender / receiver persons by ID.")
|
||||
Component(tagSvc, "TagService", "Spring Service", "See diagram 3d. Called by DocumentService to find or create tags by name.")
|
||||
|
||||
Rel(frontend, docCtrl, "Document requests", "HTTP / JSON")
|
||||
Rel(frontend, adminCtrl, "Trigger import", "HTTP / JSON")
|
||||
Rel(docCtrl, docSvc, "Delegates to")
|
||||
Rel(adminCtrl, massImport, "Triggers")
|
||||
Rel(docSvc, fileSvc, "Upload / download files")
|
||||
Rel(docSvc, docRepo, "Reads / writes documents")
|
||||
Rel(docSvc, docSpec, "Builds search predicates")
|
||||
Rel(docSvc, personSvc, "Resolves sender / receivers")
|
||||
Rel(docSvc, tagSvc, "Finds or creates tags")
|
||||
Rel(massImport, excelSvc, "Parses Excel/ODS file")
|
||||
Rel(excelSvc, docSvc, "Creates / updates documents")
|
||||
Rel(minioConf, fileSvc, "Provides S3Client and S3Presigner beans")
|
||||
Rel(fileSvc, minio, "PUT / GET / presigned URL objects", "S3 API / HTTP")
|
||||
Rel(docRepo, db, "SQL queries", "JDBC")
|
||||
|
||||
@enduml
|
||||
41
docs/architecture/c4/l3-backend-3c-transcription.puml
Normal file
41
docs/architecture/c4/l3-backend-3c-transcription.puml
Normal file
@@ -0,0 +1,41 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: API Backend — Document Transcription Pipeline
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL", "PostgreSQL 16")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(transcriptionCtrl, "TranscriptionBlockController", "Spring MVC — /api/transcription", "CRUD for transcription text blocks per document page. Manages sort order, review status, and block version history.")
|
||||
Component(annotationCtrl, "AnnotationController", "Spring MVC — /api/documents/{id}/annotations", "CRUD for free-form page annotations with polygon coordinates, colour coding, and file-hash tracking.")
|
||||
Component(commentCtrl, "CommentController", "Spring MVC — /api/documents/{id}/comments", "Threaded comment CRUD on transcription blocks with @mention support and notification triggers.")
|
||||
Component(transcriptionSvc, "TranscriptionService", "Spring Service", "Creates and updates transcription blocks from annotation regions. Tracks block versions, sanitizes text with an HTML allow-list, and triggers mentions.")
|
||||
Component(transcriptionQueueSvc, "TranscriptionQueueService", "Spring Service", "Assembles segmentation, transcription, and review queue projections by delegating to DocumentService and AuditLogQueryService.")
|
||||
Component(annotationSvc, "AnnotationService", "Spring Service", "Manages document page annotations with polygon coordinates. Called by OcrAsyncRunner to persist OCR-generated block boundaries.")
|
||||
Component(commentSvc, "CommentService", "Spring Service", "Creates and manages threaded comments with @mention parsing. Triggers NotificationService for REPLY and MENTION events.")
|
||||
Component(blockRepo, "TranscriptionBlockRepository", "Spring Data JPA", "Reads and writes TranscriptionBlock and TranscriptionBlockVersion records.")
|
||||
Component(annotationRepo, "AnnotationRepository", "Spring Data JPA", "Reads and writes DocumentAnnotation records.")
|
||||
Component(commentRepo, "CommentRepository", "Spring Data JPA", "Reads and writes DocumentComment records.")
|
||||
}
|
||||
|
||||
Component(documentSvc, "DocumentService", "Spring Service", "See diagram 3b. Called by TranscriptionQueueService to assemble pipeline queue projections.")
|
||||
Component(auditQuerySvc, "AuditLogQueryService", "Spring Service", "See diagram 3g. Called by TranscriptionQueueService for pipeline activity data.")
|
||||
|
||||
Rel(frontend, transcriptionCtrl, "Transcription block requests", "HTTP / JSON")
|
||||
Rel(frontend, annotationCtrl, "Annotation requests", "HTTP / JSON")
|
||||
Rel(frontend, commentCtrl, "Comment requests", "HTTP / JSON")
|
||||
Rel(transcriptionCtrl, transcriptionSvc, "Delegates to")
|
||||
Rel(transcriptionCtrl, transcriptionQueueSvc, "Queries pipeline queues")
|
||||
Rel(annotationCtrl, annotationSvc, "Delegates to")
|
||||
Rel(commentCtrl, commentSvc, "Delegates to")
|
||||
Rel(transcriptionSvc, blockRepo, "Reads / writes blocks and versions")
|
||||
Rel(annotationSvc, annotationRepo, "Reads / writes annotations")
|
||||
Rel(commentSvc, commentRepo, "Reads / writes comments")
|
||||
Rel(transcriptionQueueSvc, documentSvc, "Queries pipeline document state")
|
||||
Rel(transcriptionQueueSvc, auditQuerySvc, "Queries pipeline activity data")
|
||||
Rel(blockRepo, db, "SQL queries", "JDBC")
|
||||
Rel(annotationRepo, db, "SQL queries", "JDBC")
|
||||
Rel(commentRepo, db, "SQL queries", "JDBC")
|
||||
|
||||
@enduml
|
||||
41
docs/architecture/c4/l3-backend-3d-users-groups.puml
Normal file
41
docs/architecture/c4/l3-backend-3d-users-groups.puml
Normal file
@@ -0,0 +1,41 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: API Backend — Users, Groups & Administration
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL", "PostgreSQL 16")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(userCtrl, "UserController", "Spring MVC — /api/users", "Returns current user (/me), creates and deletes users (requires ADMIN_USER), supports user search and profile updates.")
|
||||
Component(groupCtrl, "GroupController", "Spring MVC — /api/groups", "Lists and manages permission groups.")
|
||||
Component(tagCtrl, "TagController", "Spring MVC — /api/tags", "Lists tags for typeahead, supports tag merge, tree structure, and subtree deletion.")
|
||||
Component(inviteCtrl, "InviteController", "Spring MVC — /api/auth/invite", "Creates invite codes and validates them at registration time. Rate-limited via WebConfig interceptor.")
|
||||
Component(authCtrl, "AuthController", "Spring MVC — /api/auth", "Handles user registration (POST /register) and password reset token endpoints (/forgot-password, /reset-password).")
|
||||
Component(userSvc, "UserService", "Spring Service", "User CRUD with BCrypt password encoding, group assignment, and audit logging. Orchestrates invite-based registration and password reset tokens.")
|
||||
Component(tagSvc, "TagService", "Spring Service", "Tag CRUD with name search, hierarchical tree structure, merge/reparent operations, and recursive subtree deletion.")
|
||||
Component(dataInit, "DataInitializer", "CommandLineRunner", "On startup: creates default admin user and groups if none exist. Seeds test data if DB is empty.")
|
||||
Component(userRepo, "AppUserRepository", "Spring Data JPA", "Finds users by email. Supports search by email or display name.")
|
||||
Component(groupRepo, "UserGroupRepository", "Spring Data JPA", "Manages permission groups.")
|
||||
Component(tagRepo, "TagRepository", "Spring Data JPA", "Finds or creates tags by name (case-insensitive). Supports recursive ancestor/descendant CTE queries and merge/reparent helpers.")
|
||||
}
|
||||
|
||||
Rel(frontend, userCtrl, "User requests", "HTTP / JSON")
|
||||
Rel(frontend, groupCtrl, "Group requests", "HTTP / JSON")
|
||||
Rel(frontend, tagCtrl, "Tag requests", "HTTP / JSON")
|
||||
Rel(frontend, inviteCtrl, "Invite validation", "HTTP / JSON")
|
||||
Rel(frontend, authCtrl, "Registration and password reset", "HTTP / JSON")
|
||||
Rel(userCtrl, userSvc, "Delegates to")
|
||||
Rel(groupCtrl, userSvc, "Delegates to")
|
||||
Rel(tagCtrl, tagSvc, "Delegates to")
|
||||
Rel(tagSvc, tagRepo, "Reads / writes tags")
|
||||
Rel(inviteCtrl, userSvc, "Creates and validates invites")
|
||||
Rel(authCtrl, userSvc, "Registers users, resets passwords")
|
||||
Rel(userSvc, userRepo, "Reads / writes users")
|
||||
Rel(userSvc, groupRepo, "Assigns groups")
|
||||
Rel(dataInit, db, "Seeds initial data", "JDBC")
|
||||
Rel(userRepo, db, "SQL queries", "JDBC")
|
||||
Rel(groupRepo, db, "SQL queries", "JDBC")
|
||||
Rel(tagRepo, db, "SQL queries", "JDBC")
|
||||
|
||||
@enduml
|
||||
30
docs/architecture/c4/l3-backend-3e-persons.puml
Normal file
30
docs/architecture/c4/l3-backend-3e-persons.puml
Normal file
@@ -0,0 +1,30 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: API Backend — Persons & Family Graph
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL", "PostgreSQL 16")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(personCtrl, "PersonController", "Spring MVC — /api/persons", "Lists and searches family members. Returns documents sent by or received by a person, correspondent suggestions, and person summary with document counts.")
|
||||
Component(relCtrl, "RelationshipController", "Spring MVC — /api/network, /api/persons/{id}/relationships", "CRUD for explicit person relationships and the full family network graph (nodes + edges) used by the Stammbaum view.")
|
||||
Component(personSvc, "PersonService", "Spring Service", "Person CRUD, alias management, and merge operations (reassigns all document sender/receiver references before deleting duplicate persons).")
|
||||
Component(relSvc, "RelationshipService", "Spring Service", "Manages explicit directional family relationships (PARENT_OF, SPOUSE_OF, SIBLING_OF, etc.) with optional date ranges and notes.")
|
||||
Component(relInference, "RelationshipInferenceService", "Spring Service", "Computes transitive family relationships from explicit edges to infer grandparent/grandchild, aunt/uncle, and other extended-family links for the network graph.")
|
||||
Component(personRepo, "PersonRepository", "Spring Data JPA", "Queries persons with name search (including aliases), correspondent discovery, person summaries with document counts, and merge/reassignment helpers.")
|
||||
Component(relRepo, "PersonRelationshipRepository", "Spring Data JPA", "Reads and writes PersonRelationship records. Supports lookup by person ID, by relation type, and existence checks for deduplication.")
|
||||
}
|
||||
|
||||
Rel(frontend, personCtrl, "Person requests", "HTTP / JSON")
|
||||
Rel(frontend, relCtrl, "Relationship and graph requests", "HTTP / JSON")
|
||||
Rel(personCtrl, personSvc, "Delegates to")
|
||||
Rel(relCtrl, relSvc, "Delegates to")
|
||||
Rel(relCtrl, relInference, "Queries inferred graph")
|
||||
Rel(personSvc, personRepo, "Reads / writes persons")
|
||||
Rel(relSvc, relRepo, "Reads / writes relationships")
|
||||
Rel(relInference, relRepo, "Reads relationships for inference")
|
||||
Rel(personRepo, db, "SQL queries", "JDBC")
|
||||
Rel(relRepo, db, "SQL queries", "JDBC")
|
||||
|
||||
@enduml
|
||||
41
docs/architecture/c4/l3-backend-3f-ocr.puml
Normal file
41
docs/architecture/c4/l3-backend-3f-ocr.puml
Normal file
@@ -0,0 +1,41 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: API Backend — OCR Orchestration
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL", "PostgreSQL 16")
|
||||
ContainerDb(minio, "Object Storage", "MinIO (S3-compatible)")
|
||||
Container(ocrPy, "OCR Service", "Python FastAPI")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(ocrCtrl, "OcrController", "Spring MVC — /api/ocr", "REST entry point: trigger single or batch OCR jobs, stream progress via SSE, query job status, and manage training runs and per-sender models.")
|
||||
Component(ocrSvc, "OcrService", "Spring Service", "Creates OcrJob and OcrJobDocument records, checks Python service health, and delegates async execution to OcrAsyncRunner.")
|
||||
Component(ocrBatch, "OcrBatchService", "Spring Service", "Orchestrates multi-document OCR jobs, iterating documents and delegating each to OcrAsyncRunner.")
|
||||
Component(ocrAsync, "OcrAsyncRunner", "Spring Component — @Async", "Async worker that streams OCR results from Python page by page, persists transcription blocks and annotations via domain services, and emits progress via SSE.")
|
||||
Component(ocrClient, "RestClientOcrClient", "Spring Component", "HTTP client wrapping the Python service: POST /ocr/stream (NDJSON), /train, /segtrain, and /train-sender. Falls back from streaming to batch on 404.")
|
||||
Component(ocrTraining, "OcrTrainingService", "Spring Service", "Orchestrates model training: exports training data as ZIP, calls Python /train or /segtrain, persists training metrics in OcrTrainingRunRepository.")
|
||||
Component(ocrJobRepo, "OcrJobRepository, OcrJobDocumentRepository", "Spring Data JPA", "Reads and writes OcrJob and OcrJobDocument records. Tracks job status (RUNNING/DONE/FAILED), per-document progress, page counts, and error messages.")
|
||||
}
|
||||
|
||||
Component(transcriptionSvc, "TranscriptionService", "Spring Service", "See diagram 3c. Called by OcrAsyncRunner to persist transcription blocks per page.")
|
||||
Component(annotationSvc, "AnnotationService", "Spring Service", "See diagram 3c. Called by OcrAsyncRunner to persist OCR-generated annotation regions per page.")
|
||||
|
||||
Rel(frontend, ocrCtrl, "OCR trigger, status, and progress requests", "HTTP / JSON / SSE")
|
||||
Rel(ocrCtrl, ocrSvc, "Single-document jobs")
|
||||
Rel(ocrCtrl, ocrBatch, "Batch jobs")
|
||||
Rel(ocrCtrl, ocrTraining, "Training runs")
|
||||
Rel(ocrSvc, ocrAsync, "Delegates async execution")
|
||||
Rel(ocrBatch, ocrAsync, "Delegates async execution")
|
||||
Rel(ocrAsync, ocrClient, "Streams OCR results page by page", "HTTP / NDJSON")
|
||||
Rel(ocrTraining, ocrClient, "Sends training data ZIP", "HTTP / multipart")
|
||||
Rel(ocrClient, ocrPy, "POST /ocr/stream, /train, /segtrain, /train-sender", "HTTP / REST")
|
||||
Rel(ocrAsync, transcriptionSvc, "Saves transcription blocks per page")
|
||||
Rel(ocrAsync, annotationSvc, "Saves annotation regions per page")
|
||||
Rel(ocrAsync, ocrJobRepo, "Reads / writes OCR job state")
|
||||
Rel(ocrJobRepo, db, "SQL queries", "JDBC")
|
||||
Rel(ocrAsync, minio, "Generates presigned URLs for PDF fetch", "S3 API")
|
||||
Rel(ocrPy, minio, "Fetches PDF via presigned URL", "HTTP / S3 presigned")
|
||||
Rel(ocrTraining, db, "Persists training run metrics", "JDBC")
|
||||
|
||||
@enduml
|
||||
46
docs/architecture/c4/l3-backend-3g-supporting.puml
Normal file
46
docs/architecture/c4/l3-backend-3g-supporting.puml
Normal file
@@ -0,0 +1,46 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: API Backend — Supporting Domains
|
||||
|
||||
Container(frontend, "Web Frontend", "SvelteKit")
|
||||
ContainerDb(db, "PostgreSQL", "PostgreSQL 16")
|
||||
|
||||
System_Boundary(backend, "API Backend (Spring Boot)") {
|
||||
Component(auditSvc, "AuditService", "Spring Service — @Async", "Writes audit log entries asynchronously via a dedicated TaskExecutor, with transaction-aware logging to prevent deadlocks on concurrent saves.")
|
||||
Component(auditQuery, "AuditLogQueryService", "Spring Service", "Queries audit logs for activity feeds, pulse stats, recent contributors, and per-document history. Facade over AuditLogRepository.")
|
||||
Component(dashCtrl, "DashboardController", "Spring MVC — /api/dashboard", "REST endpoints for the user dashboard: recent document resume (/resume), weekly transcription pulse stats (/pulse), and activity feed (/activity) with kind filtering and pagination.")
|
||||
Component(statsCtrl, "StatsController", "Spring MVC — /api/stats", "Returns aggregate counts (total persons, total documents) for the UI stats bar.")
|
||||
Component(statsSvc, "StatsService", "Spring Service", "Queries aggregate counts: total persons and total documents.")
|
||||
Component(dashSvc, "DashboardService", "Spring Service", "Assembles the user dashboard: recent document resume (calls DocumentService + TranscriptionService), weekly transcription pulse stats, and activity feed with contributor avatars.")
|
||||
Component(notifCtrl, "NotificationController", "Spring MVC — /api/notifications", "REST and SSE endpoints for notification stream, history with filtering, read/unread state, and per-user preference management.")
|
||||
Component(notifSvc, "NotificationService", "Spring Service", "Creates REPLY and MENTION notifications, optionally sends email, marks as read, and pushes events to connected clients via SseEmitterRegistry.")
|
||||
Component(sseRegistry, "SseEmitterRegistry", "Spring Component", "In-memory ConcurrentHashMap of Spring SseEmitter instances per user. Handles registration, deregistration, and JSON event broadcasts.")
|
||||
Component(geschCtrl, "GeschichteController", "Spring MVC — /api/geschichten", "CRUD for publishable stories that link persons and documents. Requires BLOG_WRITE permission for write operations.")
|
||||
Component(geschSvc, "GeschichteService", "Spring Service", "Manages story lifecycle (DRAFT → PUBLISHED with timestamp). Sanitizes HTML body with an allowlist policy.")
|
||||
Component(exHandler, "GlobalExceptionHandler", "Spring @RestControllerAdvice", "Converts DomainException, validation errors, and generic exceptions to ErrorResponse JSON with machine-readable ErrorCode and HTTP status.")
|
||||
}
|
||||
|
||||
Component(documentSvc, "DocumentService", "Spring Service", "See diagram 3b. Called by DashboardService to fetch document titles and resume data.")
|
||||
Component(transcriptionSvc, "TranscriptionService", "Spring Service", "See diagram 3c. Called by DashboardService to fetch transcription block progress for resume.")
|
||||
|
||||
Rel(frontend, dashCtrl, "Dashboard requests", "HTTP / JSON")
|
||||
Rel(frontend, statsCtrl, "GET /api/stats", "HTTP / JSON")
|
||||
Rel(frontend, notifCtrl, "Notification stream and history", "HTTP / JSON / SSE")
|
||||
Rel(frontend, geschCtrl, "Story requests", "HTTP / JSON")
|
||||
Rel(dashCtrl, dashSvc, "Delegates to")
|
||||
Rel(statsCtrl, statsSvc, "Delegates to")
|
||||
Rel(statsSvc, db, "Reads aggregate counts", "JDBC")
|
||||
Rel(dashSvc, auditQuery, "Fetches activity feed and pulse stats")
|
||||
Rel(dashSvc, documentSvc, "Fetches document titles and resume data")
|
||||
Rel(dashSvc, transcriptionSvc, "Fetches transcription block progress for resume")
|
||||
Rel(notifCtrl, notifSvc, "Delegates to")
|
||||
Rel(notifCtrl, sseRegistry, "Registers client SSE connection")
|
||||
Rel(notifSvc, sseRegistry, "Broadcasts events to connected clients")
|
||||
Rel(geschCtrl, geschSvc, "Delegates to")
|
||||
Rel(auditSvc, db, "Writes audit_log", "JDBC")
|
||||
Rel(auditQuery, db, "Reads audit_log", "JDBC")
|
||||
Rel(notifSvc, db, "Reads / writes notifications", "JDBC")
|
||||
Rel(geschSvc, db, "Reads / writes geschichten", "JDBC")
|
||||
|
||||
@enduml
|
||||
29
docs/architecture/c4/l3-frontend-3a-middleware-auth.puml
Normal file
29
docs/architecture/c4/l3-frontend-3a-middleware-auth.puml
Normal file
@@ -0,0 +1,29 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: Web Frontend — Middleware, Auth & Layout
|
||||
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
Component(hooks, "hooks.server.ts", "SvelteKit Server Hook", "Four handle layers: (1) handleAuth — redirects unauthenticated users to /login; (2) userGroup — reads auth_token cookie, fetches /api/users/me, stores user in event.locals; (3) handleFetch — injects Authorization header on all outgoing /api/ calls; (4) handleLocaleDetection — sets language cookie from Accept-Language header.")
|
||||
Component(i18n, "hooks.ts (Paraglide)", "SvelteKit Client Hook", "Client-side i18n middleware. Detects language from URL and sets the active locale for Paraglide.js translation functions.")
|
||||
Component(layout, "+layout.server.ts", "SvelteKit Layout Loader", "Passes event.locals.user down to all child pages so every route has access to the authenticated user.")
|
||||
Component(loginPage, "/login", "SvelteKit Route", "Form action: encodes email:password as Base64 Basic Auth token, POSTs to /api/users/me to validate, sets auth_token httpOnly cookie (SameSite=strict, maxAge=86400), redirects to /.")
|
||||
Component(logoutPage, "/logout", "SvelteKit Route (server-only)", "Clears the auth_token cookie and redirects to /login.")
|
||||
Component(registerPage, "/register", "SvelteKit Route", "Loader validates invite code via GET /api/auth/invite/{code}. Form action: POST /api/auth/register to create the user account.")
|
||||
Component(forgotPw, "/forgot-password", "SvelteKit Route", "Form action: POST /api/auth/forgot-password. Always responds with success to prevent email enumeration.")
|
||||
Component(resetPw, "/reset-password", "SvelteKit Route", "Form action: POST /api/auth/reset-password with the token from the query string.")
|
||||
}
|
||||
|
||||
Rel(user, hooks, "Every browser request", "HTTPS")
|
||||
Rel(hooks, backend, "GET /api/users/me (session check)", "HTTP / Basic Auth")
|
||||
Rel(hooks, loginPage, "Redirect if no token")
|
||||
Rel(hooks, layout, "Stores authenticated user in event.locals")
|
||||
Rel(loginPage, backend, "POST /api/users/me (auth check)", "HTTP / Basic Auth")
|
||||
Rel(registerPage, backend, "GET /api/auth/invite/{code}, POST /api/auth/register", "HTTP / JSON")
|
||||
Rel(forgotPw, backend, "POST /api/auth/forgot-password", "HTTP / JSON")
|
||||
Rel(resetPw, backend, "POST /api/auth/reset-password", "HTTP / JSON")
|
||||
|
||||
@enduml
|
||||
43
docs/architecture/c4/l3-frontend-3b-document-workflows.puml
Normal file
43
docs/architecture/c4/l3-frontend-3b-document-workflows.puml
Normal file
@@ -0,0 +1,43 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: Web Frontend — Document Workflows
|
||||
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
Component(homePage, "/ (Home / Search)", "SvelteKit Route", "Loader: parses URL params (q, from, to, senderId, receiverId, tags), fetches /api/documents/search and /api/persons. Renders search form with full-text, date range, sender/receiver typeahead, and tag filters.")
|
||||
Component(docsListPageTs, "/documents/+page.ts", "SvelteKit Client Loader", "Client-side load gated by matchMedia('(min-width: 1024px)') and ?view query. Fetches /api/documents/density only on desktop (Tailwind lg breakpoint) and outside calendar view; degrades to empty buckets on network failure.")
|
||||
Component(timelineFilter, "TimelineDensityFilter.svelte", "Svelte Component", "Per-month density bars above the document list. Click selects a single month, emits onchange({from, to}) using YYYY-MM-DD boundaries. Hidden on mobile and tablet (below lg, 1024px) and in calendar view.")
|
||||
Component(docDetail, "/documents/[id]", "SvelteKit Route", "Loader: GET /api/documents/{id}. Page: metadata panel, inline file viewer, transcription editor, annotation layer, and comment thread.")
|
||||
Component(docEdit, "/documents/[id]/edit", "SvelteKit Route", "Edit form with PersonTypeahead, TagInput, date/location fields. Form action: PUT /api/documents/{id}.")
|
||||
Component(docNew, "/documents/new", "SvelteKit Route", "Upload form for a new document. Loader: GET /api/persons. Form action: POST /api/documents with multipart file.")
|
||||
Component(docBulkEdit, "/documents/bulk-edit", "SvelteKit Route", "Multi-document metadata editor. Loader: GET /api/documents/incomplete. Requires WRITE_ALL (redirects otherwise). Action: PATCH /api/documents/bulk.")
|
||||
Component(enrichPage, "/enrich/[id]", "SvelteKit Route", "Guided enrichment workflow. Loader: GET /api/documents/{id}. Progressively saves annotations and transcription blocks.")
|
||||
Component(apiPersons, "/api/persons (SvelteKit API)", "SvelteKit Server Route", "Proxies GET /api/persons?q=... to backend for PersonTypeahead suggestions.")
|
||||
Component(apiTags, "/api/tags (SvelteKit API)", "SvelteKit Server Route", "Proxies GET /api/tags to backend for TagInput autocomplete.")
|
||||
Component(typeahead, "PersonTypeahead.svelte", "Svelte Component", "Async autocomplete for selecting a person. Debounces input, calls /api/persons?q=.")
|
||||
Component(tagInput, "TagInput.svelte", "Svelte Component", "Multi-tag input. Supports free-text entry and selecting existing tags from /api/tags.")
|
||||
}
|
||||
|
||||
Rel(user, homePage, "Searches and browses", "HTTPS / Browser")
|
||||
Rel(homePage, backend, "GET /api/documents/search, GET /api/persons", "HTTP / JSON")
|
||||
Rel(docsListPageTs, backend, "GET /api/documents/density (desktop only, ≥1024px)", "HTTP / JSON")
|
||||
Rel(homePage, timelineFilter, "Mounts above the result list")
|
||||
Rel(docsListPageTs, timelineFilter, "Provides density / minDate / maxDate props")
|
||||
Rel(docDetail, backend, "GET /api/documents/{id}, GET /api/documents/{id}/file", "HTTP / JSON + Binary")
|
||||
Rel(docEdit, backend, "PUT /api/documents/{id}", "HTTP / Multipart")
|
||||
Rel(docNew, backend, "GET /api/persons, POST /api/documents", "HTTP / JSON + Multipart")
|
||||
Rel(docBulkEdit, backend, "GET /api/documents/incomplete, PATCH /api/documents/bulk", "HTTP / JSON")
|
||||
Rel(enrichPage, backend, "GET/POST /api/transcription, POST /api/documents/{id}/annotations", "HTTP / JSON")
|
||||
Rel(homePage, typeahead, "Uses for sender/receiver filter")
|
||||
Rel(docEdit, typeahead, "Uses for sender/receiver selection")
|
||||
Rel(docNew, typeahead, "Uses for sender selection")
|
||||
Rel(docEdit, tagInput, "Uses for tag management")
|
||||
Rel(typeahead, apiPersons, "Fetches suggestions", "HTTP")
|
||||
Rel(tagInput, apiTags, "Fetches existing tags", "HTTP")
|
||||
Rel(apiPersons, backend, "GET /api/persons", "HTTP / JSON")
|
||||
Rel(apiTags, backend, "GET /api/tags", "HTTP / JSON")
|
||||
|
||||
@enduml
|
||||
32
docs/architecture/c4/l3-frontend-3c-people-stories.puml
Normal file
32
docs/architecture/c4/l3-frontend-3c-people-stories.puml
Normal file
@@ -0,0 +1,32 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: Web Frontend — People, Stories & Discovery
|
||||
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
Component(personsPage, "/persons and /persons/[id]", "SvelteKit Routes", "Person directory and detail. Detail: metadata, document list sent/received, correspondents, explicit and inferred family relationships.")
|
||||
Component(personEdit, "/persons/[id]/edit and /persons/new", "SvelteKit Routes", "Create and edit person forms. Edit: metadata, aliases, explicit relationships. Actions: PUT/POST /api/persons.")
|
||||
Component(briefwechsel, "/briefwechsel", "SvelteKit Route", "Bilateral conversation timeline. Selects two persons via PersonTypeahead, fetches GET /api/documents/conversation, displays chronological exchange.")
|
||||
Component(aktivitaeten, "/aktivitaeten", "SvelteKit Route", "Unified activity feed (Chronik). Loader: GET /api/dashboard/activity and GET /api/notifications?read=false.")
|
||||
Component(geschichten, "/geschichten and /geschichten/[id]", "SvelteKit Routes", "Story list and detail pages. Loader: GET /api/geschichten?status=PUBLISHED.")
|
||||
Component(geschichtenEdit, "/geschichten/[id]/edit and /geschichten/new", "SvelteKit Routes", "Story editor with rich text, person and document linking. Actions: PUT/POST /api/geschichten. Requires BLOG_WRITE permission.")
|
||||
Component(stammbaum, "/stammbaum", "SvelteKit Route", "Family tree visualisation. Loader: GET /api/network (nodes + edges). Renders interactive family tree from network graph data.")
|
||||
Component(profilePage, "/profile", "SvelteKit Route", "Current user profile settings. Loader: GET /api/users/me/notification-preferences. Actions: update name/password and notification preferences.")
|
||||
Component(userProfile, "/users/[id]", "SvelteKit Route", "Public user profile view. Loader: GET /api/users/{id}.")
|
||||
}
|
||||
|
||||
Rel(user, personsPage, "Browses family members", "HTTPS / Browser")
|
||||
Rel(personsPage, backend, "GET /api/persons, GET /api/persons/{id}", "HTTP / JSON")
|
||||
Rel(personEdit, backend, "GET /api/persons/{id}, PUT /api/persons/{id}, POST /api/persons", "HTTP / JSON")
|
||||
Rel(briefwechsel, backend, "GET /api/documents/conversation", "HTTP / JSON")
|
||||
Rel(aktivitaeten, backend, "GET /api/dashboard/activity, GET /api/notifications", "HTTP / JSON")
|
||||
Rel(geschichten, backend, "GET /api/geschichten", "HTTP / JSON")
|
||||
Rel(geschichtenEdit, backend, "GET/PUT/POST /api/geschichten", "HTTP / JSON")
|
||||
Rel(stammbaum, backend, "GET /api/network", "HTTP / JSON")
|
||||
Rel(profilePage, backend, "GET/PUT /api/users/me, notification-preferences", "HTTP / JSON")
|
||||
Rel(userProfile, backend, "GET /api/users/{id}", "HTTP / JSON")
|
||||
|
||||
@enduml
|
||||
27
docs/architecture/c4/l3-frontend-3d-administration.puml
Normal file
27
docs/architecture/c4/l3-frontend-3d-administration.puml
Normal file
@@ -0,0 +1,27 @@
|
||||
@startuml
|
||||
!include <C4/C4_Component>
|
||||
|
||||
title Component Diagram: Web Frontend — Administration & Help
|
||||
|
||||
Person(admin, "Administrator")
|
||||
Person(user, "User")
|
||||
Container(backend, "API Backend", "Spring Boot")
|
||||
|
||||
System_Boundary(frontend, "Web Frontend (SvelteKit / SSR)") {
|
||||
Component(adminUsers, "/admin/users, /admin/users/[id], /admin/users/new, /admin/invites", "SvelteKit Routes", "User directory, create/update/delete users, and manage invite codes. Requires ADMIN_USER permission.")
|
||||
Component(adminGroups, "/admin/groups, /admin/groups/[id], /admin/groups/new", "SvelteKit Routes", "Permission group management: create/edit groups and their permission sets.")
|
||||
Component(adminTags, "/admin/tags and /admin/tags/[id]", "SvelteKit Routes", "Tag administration: edit tag hierarchy, merge tags, delete subtrees.")
|
||||
Component(adminOcr, "/admin/ocr and /admin/ocr/[personId]", "SvelteKit Routes", "Global and per-person OCR configuration. Manages script types and triggers sender model training.")
|
||||
Component(adminSystem, "/admin/system", "SvelteKit Route", "System status panel. Triggers Excel/ODS mass import (POST /api/admin/trigger-import). Displays import state.")
|
||||
Component(hilfe, "/hilfe/transkription", "SvelteKit Route", "Static transcription style guide for Kurrent and Sütterlin character recognition. No backend calls.")
|
||||
}
|
||||
|
||||
Rel(admin, adminUsers, "Manages users and invites", "HTTPS / Browser")
|
||||
Rel(user, hilfe, "Views transcription style guide", "HTTPS / Browser")
|
||||
Rel(adminUsers, backend, "GET/POST/DELETE /api/users, POST /api/auth/invite", "HTTP / JSON")
|
||||
Rel(adminGroups, backend, "GET/POST/PUT/DELETE /api/groups", "HTTP / JSON")
|
||||
Rel(adminTags, backend, "GET/PUT/DELETE /api/tags", "HTTP / JSON")
|
||||
Rel(adminOcr, backend, "GET/POST /api/ocr (global config and sender training)", "HTTP / JSON")
|
||||
Rel(adminSystem, backend, "POST /api/admin/trigger-import, GET /api/admin/import-status", "HTTP / JSON")
|
||||
|
||||
@enduml
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user