feat(search): add applyOffsets utility and regenerate API types with MatchOffset/SearchMatchData
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -628,6 +628,22 @@ export interface paths {
|
||||
patch: operations["editComment"];
|
||||
trace?: never;
|
||||
};
|
||||
"/api/documents/{documentId}/annotations/{annotationId}": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
get?: never;
|
||||
put?: never;
|
||||
post?: never;
|
||||
delete: operations["deleteAnnotation"];
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch: operations["updateAnnotation"];
|
||||
trace?: never;
|
||||
};
|
||||
"/api/users/search": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -1060,22 +1076,6 @@ export interface paths {
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/documents/{documentId}/annotations/{annotationId}": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
get?: never;
|
||||
put?: never;
|
||||
post?: never;
|
||||
delete: operations["deleteAnnotation"];
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
}
|
||||
export type webhooks = Record<string, never>;
|
||||
export interface components {
|
||||
@@ -1440,6 +1440,16 @@ export interface components {
|
||||
label?: string;
|
||||
enrolled?: boolean;
|
||||
};
|
||||
UpdateAnnotationDTO: {
|
||||
/** Format: double */
|
||||
x?: number;
|
||||
/** Format: double */
|
||||
y?: number;
|
||||
/** Format: double */
|
||||
width?: number;
|
||||
/** Format: double */
|
||||
height?: number;
|
||||
};
|
||||
StatsDTO: {
|
||||
/** Format: int64 */
|
||||
totalPersons?: number;
|
||||
@@ -1451,17 +1461,17 @@ export interface components {
|
||||
/** Format: uuid */
|
||||
id?: string;
|
||||
displayName?: string;
|
||||
personType?: string;
|
||||
firstName?: string;
|
||||
lastName?: string;
|
||||
/** Format: int64 */
|
||||
documentCount?: number;
|
||||
/** Format: int32 */
|
||||
birthYear?: number;
|
||||
/** Format: int32 */
|
||||
deathYear?: number;
|
||||
alias?: string;
|
||||
notes?: string;
|
||||
/** Format: int64 */
|
||||
documentCount?: number;
|
||||
personType?: string;
|
||||
};
|
||||
TrainingInfoResponse: {
|
||||
/** Format: int32 */
|
||||
@@ -1508,6 +1518,8 @@ export interface components {
|
||||
/** Format: int64 */
|
||||
totalElements?: number;
|
||||
pageable?: components["schemas"]["PageableObject"];
|
||||
first?: boolean;
|
||||
last?: boolean;
|
||||
/** Format: int32 */
|
||||
size?: number;
|
||||
content?: components["schemas"]["NotificationDTO"][];
|
||||
@@ -1516,8 +1528,6 @@ export interface components {
|
||||
sort?: components["schemas"]["SortObject"];
|
||||
/** Format: int32 */
|
||||
numberOfElements?: number;
|
||||
first?: boolean;
|
||||
last?: boolean;
|
||||
empty?: boolean;
|
||||
};
|
||||
PageableObject: {
|
||||
@@ -1581,6 +1591,22 @@ export interface components {
|
||||
documents?: components["schemas"]["Document"][];
|
||||
/** Format: int64 */
|
||||
total?: number;
|
||||
matchData: {
|
||||
[key: string]: components["schemas"]["SearchMatchData"];
|
||||
};
|
||||
};
|
||||
MatchOffset: {
|
||||
/** Format: int32 */
|
||||
start: number;
|
||||
/** Format: int32 */
|
||||
length: number;
|
||||
};
|
||||
SearchMatchData: {
|
||||
transcriptionSnippet?: string;
|
||||
titleOffsets: components["schemas"]["MatchOffset"][];
|
||||
senderMatched: boolean;
|
||||
matchedReceiverIds: string[];
|
||||
matchedTagIds: string[];
|
||||
};
|
||||
IncompleteDocumentDTO: {
|
||||
/** Format: uuid */
|
||||
@@ -2938,8 +2964,8 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
responses: {
|
||||
/** @description OK */
|
||||
200: {
|
||||
/** @description No Content */
|
||||
204: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
@@ -2995,6 +3021,54 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
deleteAnnotation: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path: {
|
||||
documentId: string;
|
||||
annotationId: string;
|
||||
};
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody?: never;
|
||||
responses: {
|
||||
/** @description No Content */
|
||||
204: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content?: never;
|
||||
};
|
||||
};
|
||||
};
|
||||
updateAnnotation: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path: {
|
||||
documentId: string;
|
||||
annotationId: string;
|
||||
};
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody: {
|
||||
content: {
|
||||
"application/json": components["schemas"]["UpdateAnnotationDTO"];
|
||||
};
|
||||
};
|
||||
responses: {
|
||||
/** @description OK */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"*/*": components["schemas"]["DocumentAnnotation"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
search: {
|
||||
parameters: {
|
||||
query?: {
|
||||
@@ -3425,7 +3499,7 @@ export interface operations {
|
||||
/** @description Filter by document status */
|
||||
status?: "PLACEHOLDER" | "UPLOADED" | "TRANSCRIBED" | "REVIEWED" | "ARCHIVED";
|
||||
/** @description Sort field */
|
||||
sort?: "DATE" | "TITLE" | "SENDER" | "RECEIVER" | "UPLOAD_DATE";
|
||||
sort?: "DATE" | "TITLE" | "SENDER" | "RECEIVER" | "UPLOAD_DATE" | "RELEVANCE";
|
||||
/** @description Sort direction: ASC or DESC */
|
||||
dir?: string;
|
||||
};
|
||||
@@ -3602,25 +3676,4 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
deleteAnnotation: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path: {
|
||||
documentId: string;
|
||||
annotationId: string;
|
||||
};
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody?: never;
|
||||
responses: {
|
||||
/** @description No Content */
|
||||
204: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content?: never;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
95
frontend/src/lib/search.spec.ts
Normal file
95
frontend/src/lib/search.spec.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import { applyOffsets } from './search';
|
||||
|
||||
describe('applyOffsets', () => {
|
||||
it('returns single plain segment when offsets is empty', () => {
|
||||
expect(applyOffsets('Hallo Welt', [])).toEqual([{ text: 'Hallo Welt', highlight: false }]);
|
||||
});
|
||||
|
||||
it('highlights a single term at the start', () => {
|
||||
expect(applyOffsets('Brief an Anna', [{ start: 0, length: 5 }])).toEqual([
|
||||
{ text: 'Brief', highlight: true },
|
||||
{ text: ' an Anna', highlight: false }
|
||||
]);
|
||||
});
|
||||
|
||||
it('highlights a term in the middle', () => {
|
||||
expect(applyOffsets('Der Brief von Anna', [{ start: 4, length: 5 }])).toEqual([
|
||||
{ text: 'Der ', highlight: false },
|
||||
{ text: 'Brief', highlight: true },
|
||||
{ text: ' von Anna', highlight: false }
|
||||
]);
|
||||
});
|
||||
|
||||
it('highlights a term at the end', () => {
|
||||
expect(applyOffsets('Brief an Anna', [{ start: 9, length: 4 }])).toEqual([
|
||||
{ text: 'Brief an ', highlight: false },
|
||||
{ text: 'Anna', highlight: true }
|
||||
]);
|
||||
});
|
||||
|
||||
it('handles two non-overlapping offsets in order', () => {
|
||||
expect(
|
||||
applyOffsets('Anna und Brief', [
|
||||
{ start: 0, length: 4 },
|
||||
{ start: 9, length: 5 }
|
||||
])
|
||||
).toEqual([
|
||||
{ text: 'Anna', highlight: true },
|
||||
{ text: ' und ', highlight: false },
|
||||
{ text: 'Brief', highlight: true }
|
||||
]);
|
||||
});
|
||||
|
||||
it('merges overlapping offsets into the longest span', () => {
|
||||
// [0,7) and [3,9) overlap → merged [0,max(7,9)) = [0,9) = "Hello wor"
|
||||
expect(
|
||||
applyOffsets('Hello world', [
|
||||
{ start: 0, length: 7 },
|
||||
{ start: 3, length: 6 }
|
||||
])
|
||||
).toEqual([
|
||||
{ text: 'Hello wor', highlight: true },
|
||||
{ text: 'ld', highlight: false }
|
||||
]);
|
||||
});
|
||||
|
||||
it('merges adjacent (touching) offsets', () => {
|
||||
// [0,3) and [3,6) are adjacent → merged [0,6)
|
||||
expect(
|
||||
applyOffsets('Hallo Welt', [
|
||||
{ start: 0, length: 3 },
|
||||
{ start: 3, length: 3 }
|
||||
])
|
||||
).toEqual([
|
||||
{ text: 'Hallo ', highlight: true },
|
||||
{ text: 'Welt', highlight: false }
|
||||
]);
|
||||
});
|
||||
|
||||
it('clamps offset that extends beyond text length', () => {
|
||||
expect(applyOffsets('Hi', [{ start: 0, length: 100 }])).toEqual([
|
||||
{ text: 'Hi', highlight: true }
|
||||
]);
|
||||
});
|
||||
|
||||
it('ignores a completely out-of-bounds offset', () => {
|
||||
expect(applyOffsets('Hi', [{ start: 10, length: 5 }])).toEqual([
|
||||
{ text: 'Hi', highlight: false }
|
||||
]);
|
||||
});
|
||||
|
||||
it('sorts unsorted offsets correctly', () => {
|
||||
// Offsets provided in reverse order: second term first
|
||||
expect(
|
||||
applyOffsets('Anna und Brief', [
|
||||
{ start: 9, length: 5 },
|
||||
{ start: 0, length: 4 }
|
||||
])
|
||||
).toEqual([
|
||||
{ text: 'Anna', highlight: true },
|
||||
{ text: ' und ', highlight: false },
|
||||
{ text: 'Brief', highlight: true }
|
||||
]);
|
||||
});
|
||||
});
|
||||
46
frontend/src/lib/search.ts
Normal file
46
frontend/src/lib/search.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
export type TextSegment = { text: string; highlight: boolean };
|
||||
|
||||
export type MatchOffset = { start: number; length: number };
|
||||
|
||||
/**
|
||||
* Converts a flat string and a list of character-level highlight offsets into
|
||||
* an array of text segments that can be rendered without {@html}.
|
||||
*
|
||||
* Offsets are sorted and merged (overlapping spans become the longest enclosing
|
||||
* span) before processing. Out-of-bounds offsets are clamped or dropped.
|
||||
*
|
||||
* @param text The display text (no delimiter characters).
|
||||
* @param offsets Character offsets produced by the backend (Java char positions,
|
||||
* compatible with JavaScript String indexing).
|
||||
*/
|
||||
export function applyOffsets(text: string, offsets: MatchOffset[]): TextSegment[] {
|
||||
if (!offsets.length) return [{ text, highlight: false }];
|
||||
|
||||
// Sort by start position and merge overlapping / adjacent spans
|
||||
const sorted = [...offsets].sort((a, b) => a.start - b.start);
|
||||
const merged: { start: number; end: number }[] = [];
|
||||
for (const { start, length } of sorted) {
|
||||
const end = start + length;
|
||||
if (end <= 0 || start >= text.length) continue; // completely out of bounds
|
||||
const clampedStart = Math.max(0, start);
|
||||
const clampedEnd = Math.min(text.length, end);
|
||||
const last = merged[merged.length - 1];
|
||||
if (!last || clampedStart > last.end) {
|
||||
merged.push({ start: clampedStart, end: clampedEnd });
|
||||
} else {
|
||||
last.end = Math.max(last.end, clampedEnd);
|
||||
}
|
||||
}
|
||||
|
||||
if (!merged.length) return [{ text, highlight: false }];
|
||||
|
||||
const segments: TextSegment[] = [];
|
||||
let pos = 0;
|
||||
for (const { start, end } of merged) {
|
||||
if (pos < start) segments.push({ text: text.slice(pos, start), highlight: false });
|
||||
segments.push({ text: text.slice(start, end), highlight: true });
|
||||
pos = end;
|
||||
}
|
||||
if (pos < text.length) segments.push({ text: text.slice(pos), highlight: false });
|
||||
return segments;
|
||||
}
|
||||
Reference in New Issue
Block a user