diff --git a/packages/docx-core/scripts/validate_primitives_openspec_coverage.mjs b/packages/docx-core/scripts/validate_primitives_openspec_coverage.mjs
index 28a03d1..69390e1 100644
--- a/packages/docx-core/scripts/validate_primitives_openspec_coverage.mjs
+++ b/packages/docx-core/scripts/validate_primitives_openspec_coverage.mjs
@@ -9,11 +9,11 @@ const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const PACKAGE_ROOT = path.resolve(__dirname, '..');
const REPO_ROOT = path.resolve(PACKAGE_ROOT, '..', '..');
-const TEST_ROOT = path.join(PACKAGE_ROOT, 'test-primitives');
+const TEST_ROOT = path.join(PACKAGE_ROOT, 'src', 'primitives');
const SRC_ROOT = path.join(PACKAGE_ROOT, 'src', 'primitives');
const CANONICAL_SPEC = path.join(REPO_ROOT, 'openspec', 'specs', 'docx-primitives', 'spec.md');
const CHANGES_ROOT = path.join(REPO_ROOT, 'openspec', 'changes');
-const DEFAULT_MATRIX_PATH = path.join(TEST_ROOT, 'DOCX_PRIMITIVES_OPENSPEC_TRACEABILITY.md');
+const DEFAULT_MATRIX_PATH = path.join(PACKAGE_ROOT, 'docs', 'DOCX_PRIMITIVES_OPENSPEC_TRACEABILITY.md');
function isTraceabilityTestFile(filePath) {
return filePath.endsWith('.test.ts');
diff --git a/packages/docx-core/src/openspec.traceability.test.ts b/packages/docx-core/src/openspec.traceability.test.ts
index 477c1ff..18b4ccf 100644
--- a/packages/docx-core/src/openspec.traceability.test.ts
+++ b/packages/docx-core/src/openspec.traceability.test.ts
@@ -29,7 +29,7 @@ import {
detectContinuationPattern,
processNumberedParagraph,
} from './numbering.js';
-import { testAllure } from './testing/allure-test.js';
+import { testAllure, allureStep } from './testing/allure-test.js';
import { assertDefined } from './testing/test-utils.js';
import { el } from './testing/dom-test-helpers.js';
import { childElements, getLeafText } from './primitives/index.js';
@@ -86,831 +86,1088 @@ describe('OpenSpec traceability: docx-comparison', () => {
// Correlation status enumeration
humanReadableTest.openspec('Status assigned during comparison')(
'Scenario: Status assigned during comparison',
- () => {
- const original = [makeTextAtom('hello')];
- const revised = [makeTextAtom('hello')];
+ async () => {
+ const { original, revised } = await allureStep('Given matching original and revised atoms', () => {
+ const original = [makeTextAtom('hello')];
+ const revised = [makeTextAtom('hello')];
+ return { original, revised };
+ });
- markCorrelationStatus(original, revised, {
- matches: [{ originalIndex: 0, revisedIndex: 0 }],
- deletedIndices: [],
- insertedIndices: [],
+ await allureStep('When correlation status is marked with a match', () => {
+ markCorrelationStatus(original, revised, {
+ matches: [{ originalIndex: 0, revisedIndex: 0 }],
+ deletedIndices: [],
+ insertedIndices: [],
+ });
});
- expect(revised[0]!.correlationStatus).toBe(CorrelationStatus.Equal);
+ await allureStep('Then the revised atom is marked as Equal', () => {
+ expect(revised[0]!.correlationStatus).toBe(CorrelationStatus.Equal);
+ });
},
);
humanReadableTest.openspec('Status for unmatched atoms')(
'Scenario: Status for unmatched atoms',
- () => {
- const original = [makeTextAtom('old')];
- const revised = [makeTextAtom('new')];
+ async () => {
+ const { original, revised } = await allureStep('Given non-matching original and revised atoms', () => {
+ return { original: [makeTextAtom('old')], revised: [makeTextAtom('new')] };
+ });
- markCorrelationStatus(original, revised, {
- matches: [],
- deletedIndices: [0],
- insertedIndices: [0],
+ await allureStep('When correlation status is marked with no matches', () => {
+ markCorrelationStatus(original, revised, {
+ matches: [],
+ deletedIndices: [0],
+ insertedIndices: [0],
+ });
});
- expect(revised[0]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ await allureStep('Then the revised atom is marked as Inserted', () => {
+ expect(revised[0]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ });
},
);
humanReadableTest.openspec('Status for deleted content')(
'Scenario: Status for deleted content',
- () => {
- const original = [makeTextAtom('old')];
- const revised = [makeTextAtom('new')];
+ async () => {
+ const { original, revised } = await allureStep('Given non-matching original and revised atoms', () => {
+ return { original: [makeTextAtom('old')], revised: [makeTextAtom('new')] };
+ });
- markCorrelationStatus(original, revised, {
- matches: [],
- deletedIndices: [0],
- insertedIndices: [0],
+ await allureStep('When correlation status is marked with deletions', () => {
+ markCorrelationStatus(original, revised, {
+ matches: [],
+ deletedIndices: [0],
+ insertedIndices: [0],
+ });
});
- expect(original[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
+ await allureStep('Then the original atom is marked as Deleted', () => {
+ expect(original[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
+ });
},
);
humanReadableTest.openspec('Status for moved source content')(
'Scenario: Status for moved source content',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given atoms with deleted and inserted similar text', () => [
makeTextAtom('The quick brown fox', CorrelationStatus.Deleted),
makeTextAtom('bridge', CorrelationStatus.Equal),
makeTextAtom('The quick brown fox jumps', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.6,
- moveMinimumWordCount: 3,
- caseInsensitiveMove: true,
+ await allureStep('When move detection is applied', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.6,
+ moveMinimumWordCount: 3,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
+ await allureStep('Then the deleted atom is marked as MovedSource', () => {
+ expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
+ });
},
);
humanReadableTest.openspec('Status for moved destination content')(
'Scenario: Status for moved destination content',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given atoms with deleted and inserted similar text', () => [
makeTextAtom('The quick brown fox', CorrelationStatus.Deleted),
makeTextAtom('bridge', CorrelationStatus.Equal),
makeTextAtom('The quick brown fox jumps', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.6,
- moveMinimumWordCount: 3,
- caseInsensitiveMove: true,
+ await allureStep('When move detection is applied', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.6,
+ moveMinimumWordCount: 3,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
+ await allureStep('Then the inserted atom is marked as MovedDestination', () => {
+ expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
+ });
},
);
humanReadableTest.openspec('Status for format-changed content')(
'Scenario: Status for format-changed content',
- () => {
- const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
- const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- after.comparisonUnitAtomBefore = before;
+ async () => {
+ const after = await allureStep('Given an atom with bold added compared to its before state', () => {
+ const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
+ const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ after.comparisonUnitAtomBefore = before;
+ return after;
+ });
- detectFormatChangesInAtomList([after]);
+ await allureStep('When format change detection is applied', () => {
+ detectFormatChangesInAtomList([after]);
+ });
- expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
+ await allureStep('Then the atom is marked as FormatChanged', () => {
+ expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
+ });
},
);
// XML element / part / hash
humanReadableTest.openspec('Element with text content')(
'Scenario: Element with text content',
- () => {
- const element = el('w:t', {}, undefined, 'Hello World');
- expect(element.tagName).toBe('w:t');
- expect(getLeafText(element)).toBe('Hello World');
+ async () => {
+ const element = await allureStep('Given a w:t element with text content', () => {
+ return el('w:t', {}, undefined, 'Hello World');
+ });
+
+ await allureStep('Then the element has correct tag and text', () => {
+ expect(element.tagName).toBe('w:t');
+ expect(getLeafText(element)).toBe('Hello World');
+ });
},
);
humanReadableTest.openspec('Element with attributes')(
'Scenario: Element with attributes',
- () => {
- const element = el('w:p', { 'pt14:Unid': 'abc123' });
- expect(element.getAttribute('pt14:Unid')).toBe('abc123');
+ async () => {
+ const element = await allureStep('Given a w:p element with a Unid attribute', () => {
+ return el('w:p', { 'pt14:Unid': 'abc123' });
+ });
+
+ await allureStep('Then the attribute value is accessible', () => {
+ expect(element.getAttribute('pt14:Unid')).toBe('abc123');
+ });
},
);
humanReadableTest.openspec('Part from main document')(
'Scenario: Part from main document',
- () => {
- expect(PART.uri).toBe('word/document.xml');
+ async () => {
+ await allureStep('Then the part URI points to the main document', () => {
+ expect(PART.uri).toBe('word/document.xml');
+ });
},
);
humanReadableTest.openspec('Hash calculation for content identity')(
'Scenario: Hash calculation for content identity',
- () => {
- const atom = createComparisonUnitAtom({
- contentElement: el('w:t', {}, undefined, 'hash me'),
- ancestors: [],
- part: PART,
+ async () => {
+ const atom = await allureStep('Given an atom created from text content', () => {
+ return createComparisonUnitAtom({
+ contentElement: el('w:t', {}, undefined, 'hash me'),
+ ancestors: [],
+ part: PART,
+ });
});
- expect(atom.sha1Hash).toHaveLength(40);
+ await allureStep('Then the SHA1 hash is 40 characters long', () => {
+ expect(atom.sha1Hash).toHaveLength(40);
+ });
},
);
// ComparisonUnitAtom interface scenarios
humanReadableTest.openspec('Atom from inserted revision')(
'Scenario: Atom from inserted revision',
- () => {
- const ins = el('w:ins', { 'w:id': '1' });
- const atom = createComparisonUnitAtom({
- contentElement: el('w:t', {}, undefined, 'new'),
- ancestors: [ins],
- part: PART,
+ async () => {
+ const atom = await allureStep('Given an atom created inside a w:ins ancestor', () => {
+ const ins = el('w:ins', { 'w:id': '1' });
+ return createComparisonUnitAtom({
+ contentElement: el('w:t', {}, undefined, 'new'),
+ ancestors: [ins],
+ part: PART,
+ });
});
- expect(atom.correlationStatus).toBe(CorrelationStatus.Inserted);
- expect(atom.revTrackElement?.tagName).toBe('w:ins');
+ await allureStep('Then the atom is marked Inserted with w:ins revision tracking', () => {
+ expect(atom.correlationStatus).toBe(CorrelationStatus.Inserted);
+ expect(atom.revTrackElement?.tagName).toBe('w:ins');
+ });
},
);
humanReadableTest.openspec('Atom from deleted revision')(
'Scenario: Atom from deleted revision',
- () => {
- const del = el('w:del', { 'w:id': '1' });
- const atom = createComparisonUnitAtom({
- contentElement: el('w:delText', {}, undefined, 'old'),
- ancestors: [del],
- part: PART,
+ async () => {
+ const atom = await allureStep('Given an atom created inside a w:del ancestor', () => {
+ const del = el('w:del', { 'w:id': '1' });
+ return createComparisonUnitAtom({
+ contentElement: el('w:delText', {}, undefined, 'old'),
+ ancestors: [del],
+ part: PART,
+ });
});
- expect(atom.correlationStatus).toBe(CorrelationStatus.Deleted);
- expect(atom.revTrackElement?.tagName).toBe('w:del');
+ await allureStep('Then the atom is marked Deleted with w:del revision tracking', () => {
+ expect(atom.correlationStatus).toBe(CorrelationStatus.Deleted);
+ expect(atom.revTrackElement?.tagName).toBe('w:del');
+ });
},
);
humanReadableTest.openspec('Atom with ancestor tracking')(
'Scenario: Atom with ancestor tracking',
- () => {
- const paragraph = el('w:p');
- const run = el('w:r');
-
- const atom = createComparisonUnitAtom({
- contentElement: el('w:t', {}, undefined, 'nested'),
- ancestors: [paragraph, run],
- part: PART,
+ async () => {
+ const atom = await allureStep('Given an atom created with paragraph and run ancestors', () => {
+ const paragraph = el('w:p');
+ const run = el('w:r');
+ return createComparisonUnitAtom({
+ contentElement: el('w:t', {}, undefined, 'nested'),
+ ancestors: [paragraph, run],
+ part: PART,
+ });
});
- expect(atom.ancestorElements.map((e) => e.tagName)).toEqual(['w:p', 'w:r']);
+ await allureStep('Then the ancestor elements preserve their tag names', () => {
+ expect(atom.ancestorElements.map((e) => e.tagName)).toEqual(['w:p', 'w:r']);
+ });
},
);
humanReadableTest.openspec('Atom marked as moved source')(
'Scenario: Atom marked as moved source',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given atoms with similar deleted and inserted text separated by equal content', () => [
makeTextAtom('The quick brown fox', CorrelationStatus.Deleted),
makeTextAtom('separator', CorrelationStatus.Equal),
makeTextAtom('The quick brown fox jumps', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.6,
- moveMinimumWordCount: 3,
- caseInsensitiveMove: true,
+ await allureStep('When move detection is applied', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.6,
+ moveMinimumWordCount: 3,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
- expect(atoms[0]!.moveGroupId).toBeDefined();
- expect(atoms[0]!.moveName).toMatch(/^move/);
+ await allureStep('Then the source atom has MovedSource status with move metadata', () => {
+ expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
+ expect(atoms[0]!.moveGroupId).toBeDefined();
+ expect(atoms[0]!.moveName).toMatch(/^move/);
+ });
},
);
humanReadableTest.openspec('Atom marked as moved destination')(
'Scenario: Atom marked as moved destination',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given atoms with similar deleted and inserted text separated by equal content', () => [
makeTextAtom('The quick brown fox', CorrelationStatus.Deleted),
makeTextAtom('separator', CorrelationStatus.Equal),
makeTextAtom('The quick brown fox jumps', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.6,
- moveMinimumWordCount: 3,
- caseInsensitiveMove: true,
+ await allureStep('When move detection is applied', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.6,
+ moveMinimumWordCount: 3,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
- expect(atoms[2]!.moveGroupId).toBe(atoms[0]!.moveGroupId);
- expect(atoms[2]!.moveName).toBe(atoms[0]!.moveName);
+ await allureStep('Then the destination atom shares move metadata with the source', () => {
+ expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
+ expect(atoms[2]!.moveGroupId).toBe(atoms[0]!.moveGroupId);
+ expect(atoms[2]!.moveName).toBe(atoms[0]!.moveName);
+ });
},
);
humanReadableTest.openspec('Atom marked as format-changed')(
'Scenario: Atom marked as format-changed',
- () => {
- const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
- const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- after.comparisonUnitAtomBefore = before;
+ async () => {
+ const after = await allureStep('Given an atom with bold added compared to its before state', () => {
+ const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
+ const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ after.comparisonUnitAtomBefore = before;
+ return after;
+ });
- detectFormatChangesInAtomList([after]);
+ await allureStep('When format change detection is applied', () => {
+ detectFormatChangesInAtomList([after]);
+ });
- expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
- expect(after.formatChange?.oldRunProperties).toBeDefined();
- expect(after.formatChange?.newRunProperties).toBeDefined();
- expect(after.formatChange?.changedProperties).toContain('bold');
+ await allureStep('Then the atom has FormatChanged status with bold in changed properties', () => {
+ expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
+ expect(after.formatChange?.oldRunProperties).toBeDefined();
+ expect(after.formatChange?.newRunProperties).toBeDefined();
+ expect(after.formatChange?.changedProperties).toContain('bold');
+ });
},
);
// Factory function scenarios
humanReadableTest.openspec('Creating atom with revision detection')(
'Scenario: Creating atom with revision detection',
- () => {
- const ins = el('w:ins', { 'w:id': '1' });
- const atom = createComparisonUnitAtom({
- contentElement: el('w:t', {}, undefined, 'new'),
- ancestors: [ins],
- part: PART,
+ async () => {
+ const atom = await allureStep('Given an atom created with a w:ins revision ancestor', () => {
+ const ins = el('w:ins', { 'w:id': '1' });
+ return createComparisonUnitAtom({
+ contentElement: el('w:t', {}, undefined, 'new'),
+ ancestors: [ins],
+ part: PART,
+ });
});
- expect(atom.correlationStatus).toBe(CorrelationStatus.Inserted);
- expect(atom.revTrackElement?.tagName).toBe('w:ins');
+ await allureStep('Then the atom detects insertion revision context', () => {
+ expect(atom.correlationStatus).toBe(CorrelationStatus.Inserted);
+ expect(atom.revTrackElement?.tagName).toBe('w:ins');
+ });
},
);
humanReadableTest.openspec('Creating atom without revision context')(
'Scenario: Creating atom without revision context',
- () => {
- const atom = createComparisonUnitAtom({
- contentElement: el('w:t', {}, undefined, 'plain'),
- ancestors: [],
- part: PART,
+ async () => {
+ const atom = await allureStep('Given an atom created with no revision ancestors', () => {
+ return createComparisonUnitAtom({
+ contentElement: el('w:t', {}, undefined, 'plain'),
+ ancestors: [],
+ part: PART,
+ });
});
- expect(atom.revTrackElement ?? null).toBeNull();
- expect([
- CorrelationStatus.Unknown,
- CorrelationStatus.Equal,
- ]).toContain(atom.correlationStatus);
+ await allureStep('Then the atom has no revision tracking and Unknown or Equal status', () => {
+ expect(atom.revTrackElement ?? null).toBeNull();
+ expect([
+ CorrelationStatus.Unknown,
+ CorrelationStatus.Equal,
+ ]).toContain(atom.correlationStatus);
+ });
},
);
// Numbering continuation scenarios
humanReadableTest.openspec('Orphan list item renders with parent format')(
'Scenario: Orphan list item renders with parent format',
- () => {
- const state = createNumberingState();
- const level0: ListLevelInfo = { ilvl: 0, start: 1, numFmt: 'decimal', lvlText: '%1.' };
- const level1: ListLevelInfo = { ilvl: 1, start: 4, numFmt: 'decimal', lvlText: '%1.%2' };
+ async () => {
+ const { state, level0, level1 } = await allureStep('Given numbering state and level definitions', () => {
+ const state = createNumberingState();
+ const level0: ListLevelInfo = { ilvl: 0, start: 1, numFmt: 'decimal', lvlText: '%1.' };
+ const level1: ListLevelInfo = { ilvl: 1, start: 4, numFmt: 'decimal', lvlText: '%1.%2' };
+ return { state, level0, level1 };
+ });
- processNumberedParagraph(state, 1, 0, level0); // 1
- processNumberedParagraph(state, 1, 0, level0); // 2
- processNumberedParagraph(state, 1, 0, level0); // 3
- const continuation = processNumberedParagraph(state, 1, 1, level1);
+ const continuation = await allureStep('When three level-0 items are processed then a level-1 orphan', () => {
+ processNumberedParagraph(state, 1, 0, level0);
+ processNumberedParagraph(state, 1, 0, level0);
+ processNumberedParagraph(state, 1, 0, level0);
+ return processNumberedParagraph(state, 1, 1, level1);
+ });
- expect(continuation).toBe(4);
+ await allureStep('Then the orphan continues as item 4', () => {
+ expect(continuation).toBe(4);
+ });
},
);
humanReadableTest.openspec('Proper nested list renders hierarchically')(
'Scenario: Proper nested list renders hierarchically',
- () => {
- const result = detectContinuationPattern(1, 1, [1, 0, 0]);
- expect(result.isContinuation).toBe(false);
- expect(result.effectiveLevel).toBe(1);
+ async () => {
+ const result = await allureStep('Given a nested list item at level 1 with parent at level 0', () => {
+ return detectContinuationPattern(1, 1, [1, 0, 0]);
+ });
+
+ await allureStep('Then it is not a continuation and stays at level 1', () => {
+ expect(result.isContinuation).toBe(false);
+ expect(result.effectiveLevel).toBe(1);
+ });
},
);
humanReadableTest.openspec('Continuation pattern inherits formatting')(
'Scenario: Continuation pattern inherits formatting',
- () => {
- const result = detectContinuationPattern(1, 4, [3, 0, 0]);
- expect(result.isContinuation).toBe(true);
- expect(result.effectiveLevel).toBe(0);
+ async () => {
+ const result = await allureStep('Given a list item with start=4 and 3 items already at level 0', () => {
+ return detectContinuationPattern(1, 4, [3, 0, 0]);
+ });
+
+ await allureStep('Then it is detected as a continuation at effective level 0', () => {
+ expect(result.isContinuation).toBe(true);
+ expect(result.effectiveLevel).toBe(0);
+ });
},
);
// Footnote numbering scenarios
humanReadableTest.openspec('First footnote displays as 1')(
'Scenario: First footnote displays as 1',
- () => {
- const tracker = new FootnoteNumberingTracker(createDocumentWithFootnotes(['2', '5', '3']));
- expect(tracker.getFootnoteDisplayNumber('2')).toBe(1);
+ async () => {
+ const tracker = await allureStep('Given a document with footnote IDs 2, 5, 3', () => {
+ return new FootnoteNumberingTracker(createDocumentWithFootnotes(['2', '5', '3']));
+ });
+
+ await allureStep('Then the first footnote in document order displays as 1', () => {
+ expect(tracker.getFootnoteDisplayNumber('2')).toBe(1);
+ });
},
);
humanReadableTest.openspec('Sequential numbering ignores XML IDs')(
'Scenario: Sequential numbering ignores XML IDs',
- () => {
- const ids = Array.from({ length: 91 }, (_, i) => (i + 2).toString());
- const tracker = new FootnoteNumberingTracker(createDocumentWithFootnotes(ids));
+ async () => {
+ const tracker = await allureStep('Given a document with 91 footnotes (IDs 2..92)', () => {
+ const ids = Array.from({ length: 91 }, (_, i) => (i + 2).toString());
+ return new FootnoteNumberingTracker(createDocumentWithFootnotes(ids));
+ });
- expect(tracker.getFootnoteDisplayNumber('2')).toBe(1);
- expect(tracker.getFootnoteDisplayNumber('92')).toBe(91);
+ await allureStep('Then display numbers are sequential regardless of XML IDs', () => {
+ expect(tracker.getFootnoteDisplayNumber('2')).toBe(1);
+ expect(tracker.getFootnoteDisplayNumber('92')).toBe(91);
+ });
},
);
humanReadableTest.openspec('Reserved footnote IDs excluded from numbering')(
'Scenario: Reserved footnote IDs excluded from numbering',
- () => {
- const tracker = new FootnoteNumberingTracker(createDocumentWithFootnotes(['0', '1', '2', '3']));
- expect(tracker.getFootnoteDisplayNumber('0')).toBeUndefined();
- expect(tracker.getFootnoteDisplayNumber('1')).toBeUndefined();
- expect(tracker.getFootnoteDisplayNumber('2')).toBe(1);
+ async () => {
+ const tracker = await allureStep('Given a document with footnote IDs including reserved 0 and 1', () => {
+ return new FootnoteNumberingTracker(createDocumentWithFootnotes(['0', '1', '2', '3']));
+ });
+
+ await allureStep('Then reserved IDs return undefined and numbering starts from ID 2', () => {
+ expect(tracker.getFootnoteDisplayNumber('0')).toBeUndefined();
+ expect(tracker.getFootnoteDisplayNumber('1')).toBeUndefined();
+ expect(tracker.getFootnoteDisplayNumber('2')).toBe(1);
+ });
},
);
humanReadableTest.openspec('Building footnote mapping')(
'Scenario: Building footnote mapping',
- () => {
- const tracker = new FootnoteNumberingTracker(createDocumentWithFootnotes(['7', '3', '8']));
- expect(tracker.getFootnoteCount()).toBe(3);
- expect(tracker.getFootnoteDisplayNumber('7')).toBe(1);
+ async () => {
+ const tracker = await allureStep('Given a document with footnote IDs 7, 3, 8', () => {
+ return new FootnoteNumberingTracker(createDocumentWithFootnotes(['7', '3', '8']));
+ });
+
+ await allureStep('Then the tracker counts 3 footnotes and first in order displays as 1', () => {
+ expect(tracker.getFootnoteCount()).toBe(3);
+ expect(tracker.getFootnoteDisplayNumber('7')).toBe(1);
+ });
},
);
humanReadableTest.openspec('Custom footnote marks respected')(
'Scenario: Custom footnote marks respected',
- () => {
- const tracker = new FootnoteNumberingTracker(createDocumentWithFootnotes(['2', '3'], new Set(['2'])));
- expect(tracker.getFootnoteDisplayNumber('2')).toBeUndefined();
- expect(tracker.hasFootnoteCustomMark('2')).toBe(true);
- expect(tracker.getFootnoteDisplayNumber('3')).toBe(1);
+ async () => {
+ const tracker = await allureStep('Given a document with footnote 2 having a custom mark', () => {
+ return new FootnoteNumberingTracker(createDocumentWithFootnotes(['2', '3'], new Set(['2'])));
+ });
+
+ await allureStep('Then the custom-marked footnote is excluded from numbering', () => {
+ expect(tracker.getFootnoteDisplayNumber('2')).toBeUndefined();
+ expect(tracker.hasFootnoteCustomMark('2')).toBe(true);
+ expect(tracker.getFootnoteDisplayNumber('3')).toBe(1);
+ });
},
);
// Move detection algorithm scenarios
humanReadableTest.openspec('Move detected between similar blocks')(
'Scenario: Move detected between similar blocks',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given deleted and inserted atoms with similar text', () => [
makeTextAtom('The quick brown fox', CorrelationStatus.Deleted),
makeTextAtom('middle', CorrelationStatus.Equal),
makeTextAtom('The quick brown fox jumps', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.6,
- moveMinimumWordCount: 3,
- caseInsensitiveMove: true,
+ await allureStep('When move detection is applied with threshold 0.6', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.6,
+ moveMinimumWordCount: 3,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
- expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
+ await allureStep('Then the atoms are paired as MovedSource and MovedDestination', () => {
+ expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
+ expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
+ });
},
);
humanReadableTest.openspec('Short blocks ignored')(
'Scenario: Short blocks ignored',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given atoms with single-word deleted and inserted text', () => [
makeTextAtom('the', CorrelationStatus.Deleted),
makeTextAtom('bridge', CorrelationStatus.Equal),
makeTextAtom('the', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.1,
- moveMinimumWordCount: 3,
- caseInsensitiveMove: true,
+ await allureStep('When move detection requires minimum 3 words', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.1,
+ moveMinimumWordCount: 3,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
- expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ await allureStep('Then short blocks remain as Deleted and Inserted', () => {
+ expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
+ expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ });
},
);
humanReadableTest.openspec('Below threshold treated as separate changes')(
'Scenario: Below threshold treated as separate changes',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given dissimilar deleted and inserted text', () => [
makeTextAtom('The quick brown fox', CorrelationStatus.Deleted),
makeTextAtom('bridge', CorrelationStatus.Equal),
makeTextAtom('A slow gray elephant', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.8,
- moveMinimumWordCount: 3,
- caseInsensitiveMove: true,
+ await allureStep('When move detection uses a high similarity threshold of 0.8', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.8,
+ moveMinimumWordCount: 3,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
- expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ await allureStep('Then the atoms remain as separate Deleted and Inserted changes', () => {
+ expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
+ expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ });
},
);
// Jaccard scenarios
humanReadableTest.openspec('Identical text returns 1.0')(
'Scenario: Identical text returns 1.0',
- () => {
- expect(jaccardWordSimilarity('hello world', 'hello world')).toBe(1);
+ async () => {
+ await allureStep('Then identical text has Jaccard similarity of 1.0', () => {
+ expect(jaccardWordSimilarity('hello world', 'hello world')).toBe(1);
+ });
},
);
humanReadableTest.openspec('No common words returns 0.0')(
'Scenario: No common words returns 0.0',
- () => {
- expect(jaccardWordSimilarity('hello world', 'foo bar')).toBe(0);
+ async () => {
+ await allureStep('Then completely different text has Jaccard similarity of 0.0', () => {
+ expect(jaccardWordSimilarity('hello world', 'foo bar')).toBe(0);
+ });
},
);
humanReadableTest.openspec('Partial overlap')(
'Scenario: Partial overlap',
- () => {
- const similarity = jaccardWordSimilarity('the quick brown fox', 'the slow brown dog');
- expect(similarity).toBeCloseTo(2 / 6, 5);
+ async () => {
+ const similarity = await allureStep('Given two strings with partial word overlap', () => {
+ return jaccardWordSimilarity('the quick brown fox', 'the slow brown dog');
+ });
+
+ await allureStep('Then the Jaccard similarity equals 2/6', () => {
+ expect(similarity).toBeCloseTo(2 / 6, 5);
+ });
},
);
// Move detection settings
humanReadableTest.openspec('Move detection disabled')(
'Scenario: Move detection disabled',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given atoms with similar deleted and inserted text', () => [
makeTextAtom('The quick brown fox', CorrelationStatus.Deleted),
makeTextAtom('bridge', CorrelationStatus.Equal),
makeTextAtom('The quick brown fox jumps', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: false,
- moveSimilarityThreshold: 0.1,
- moveMinimumWordCount: 1,
- caseInsensitiveMove: true,
+ await allureStep('When move detection is disabled', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: false,
+ moveSimilarityThreshold: 0.1,
+ moveMinimumWordCount: 1,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
- expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ await allureStep('Then atoms retain their original Deleted and Inserted status', () => {
+ expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.Deleted);
+ expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.Inserted);
+ });
},
);
humanReadableTest.openspec('Custom threshold applied')(
'Scenario: Custom threshold applied',
- () => {
- const atoms = [
+ async () => {
+ const atoms = await allureStep('Given atoms with partially overlapping deleted and inserted text', () => [
makeTextAtom('one two three four', CorrelationStatus.Deleted),
makeTextAtom('bridge', CorrelationStatus.Equal),
makeTextAtom('one two five six', CorrelationStatus.Inserted),
- ];
+ ]);
- detectMovesInAtomList(atoms, {
- detectMoves: true,
- moveSimilarityThreshold: 0.3,
- moveMinimumWordCount: 1,
- caseInsensitiveMove: true,
+ await allureStep('When move detection uses a low threshold of 0.3', () => {
+ detectMovesInAtomList(atoms, {
+ detectMoves: true,
+ moveSimilarityThreshold: 0.3,
+ moveMinimumWordCount: 1,
+ caseInsensitiveMove: true,
+ });
});
- expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
- expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
+ await allureStep('Then partial overlap is sufficient to detect a move', () => {
+ expect(atoms[0]!.correlationStatus).toBe(CorrelationStatus.MovedSource);
+ expect(atoms[2]!.correlationStatus).toBe(CorrelationStatus.MovedDestination);
+ });
},
);
// Move markup generation
humanReadableTest.openspec('Move source markup structure')(
'Scenario: Move source markup structure',
- () => {
- const content: Element[] = [el('w:r')];
- const markup = generateMoveSourceMarkup('move1', content, {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- startId: 1,
+ async () => {
+ const markup = await allureStep('Given move source markup generated for move1', () => {
+ const content: Element[] = [el('w:r')];
+ return generateMoveSourceMarkup('move1', content, {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ startId: 1,
+ });
});
- expect(markup.rangeStart.tagName).toBe('w:moveFromRangeStart');
- expect(markup.moveWrapper.tagName).toBe('w:moveFrom');
- expect(markup.rangeEnd.tagName).toBe('w:moveFromRangeEnd');
- expect(markup.rangeStart.getAttribute('w:name')).toBe('move1');
+ await allureStep('Then the markup has correct moveFrom range and wrapper elements', () => {
+ expect(markup.rangeStart.tagName).toBe('w:moveFromRangeStart');
+ expect(markup.moveWrapper.tagName).toBe('w:moveFrom');
+ expect(markup.rangeEnd.tagName).toBe('w:moveFromRangeEnd');
+ expect(markup.rangeStart.getAttribute('w:name')).toBe('move1');
+ });
},
);
humanReadableTest.openspec('Move destination markup structure')(
'Scenario: Move destination markup structure',
- () => {
- const content: Element[] = [el('w:r')];
- const markup = generateMoveDestinationMarkup('move1', content, {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- startId: 5,
+ async () => {
+ const markup = await allureStep('Given move destination markup generated for move1', () => {
+ const content: Element[] = [el('w:r')];
+ return generateMoveDestinationMarkup('move1', content, {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ startId: 5,
+ });
});
- expect(markup.rangeStart.tagName).toBe('w:moveToRangeStart');
- expect(markup.moveWrapper.tagName).toBe('w:moveTo');
- expect(markup.rangeEnd.tagName).toBe('w:moveToRangeEnd');
- expect(markup.rangeStart.getAttribute('w:name')).toBe('move1');
+ await allureStep('Then the markup has correct moveTo range and wrapper elements', () => {
+ expect(markup.rangeStart.tagName).toBe('w:moveToRangeStart');
+ expect(markup.moveWrapper.tagName).toBe('w:moveTo');
+ expect(markup.rangeEnd.tagName).toBe('w:moveToRangeEnd');
+ expect(markup.rangeStart.getAttribute('w:name')).toBe('move1');
+ });
},
);
humanReadableTest.openspec('Range IDs properly paired')(
'Scenario: Range IDs properly paired',
- () => {
- const source = generateMoveSourceMarkup('move2', [], {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- startId: 11,
- });
- const destination = generateMoveDestinationMarkup('move2', [], {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- startId: 21,
+ async () => {
+ const { source, destination } = await allureStep('Given source and destination markup for move2', () => {
+ const source = generateMoveSourceMarkup('move2', [], {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ startId: 11,
+ });
+ const destination = generateMoveDestinationMarkup('move2', [], {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ startId: 21,
+ });
+ return { source, destination };
});
- expect(source.rangeStart.getAttribute('w:id')).toBe(source.rangeEnd.getAttribute('w:id'));
- expect(destination.rangeStart.getAttribute('w:id')).toBe(destination.rangeEnd.getAttribute('w:id'));
+ await allureStep('Then range start and end IDs match within each markup', () => {
+ expect(source.rangeStart.getAttribute('w:id')).toBe(source.rangeEnd.getAttribute('w:id'));
+ expect(destination.rangeStart.getAttribute('w:id')).toBe(destination.rangeEnd.getAttribute('w:id'));
+ });
},
);
// Format change info interface
humanReadableTest.openspec('Bold added')(
'Scenario: Bold added',
- () => {
- expect(getChangedPropertyNames(el('w:rPr'), el('w:rPr', {}, [el('w:b')]))).toContain('bold');
+ async () => {
+ await allureStep('Then adding w:b is detected as bold property change', () => {
+ expect(getChangedPropertyNames(el('w:rPr'), el('w:rPr', {}, [el('w:b')]))).toContain('bold');
+ });
},
);
humanReadableTest.openspec('Multiple properties changed')(
'Scenario: Multiple properties changed',
- () => {
- const changed = getChangedPropertyNames(
- el('w:rPr', {}, [el('w:b')]),
- el('w:rPr', {}, [el('w:i'), el('w:u')]),
- );
- expect(changed).toContain('bold');
- expect(changed).toContain('italic');
- expect(changed).toContain('underline');
+ async () => {
+ const changed = await allureStep('Given properties changing from bold to italic+underline', () => {
+ return getChangedPropertyNames(
+ el('w:rPr', {}, [el('w:b')]),
+ el('w:rPr', {}, [el('w:i'), el('w:u')]),
+ );
+ });
+
+ await allureStep('Then all three changed properties are reported', () => {
+ expect(changed).toContain('bold');
+ expect(changed).toContain('italic');
+ expect(changed).toContain('underline');
+ });
},
);
// Format change detection algorithm
humanReadableTest.openspec('Text becomes bold')(
'Scenario: Text becomes bold',
- () => {
- const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
- const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- after.comparisonUnitAtomBefore = before;
+ async () => {
+ const after = await allureStep('Given an atom that gains bold formatting', () => {
+ const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
+ const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ after.comparisonUnitAtomBefore = before;
+ return after;
+ });
- detectFormatChangesInAtomList([after]);
+ await allureStep('When format change detection is applied', () => {
+ detectFormatChangesInAtomList([after]);
+ });
- expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
- expect(after.formatChange?.changedProperties).toContain('bold');
+ await allureStep('Then the atom is FormatChanged with bold in changed properties', () => {
+ expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
+ expect(after.formatChange?.changedProperties).toContain('bold');
+ });
},
);
humanReadableTest.openspec('No format change')(
'Scenario: No format change',
- () => {
- const before = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- after.comparisonUnitAtomBefore = before;
+ async () => {
+ const after = await allureStep('Given an atom with identical formatting before and after', () => {
+ const before = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ after.comparisonUnitAtomBefore = before;
+ return after;
+ });
- detectFormatChangesInAtomList([after]);
+ await allureStep('When format change detection is applied', () => {
+ detectFormatChangesInAtomList([after]);
+ });
- expect(after.correlationStatus).toBe(CorrelationStatus.Equal);
- expect(after.formatChange).toBeUndefined();
+ await allureStep('Then the atom stays Equal with no format change', () => {
+ expect(after.correlationStatus).toBe(CorrelationStatus.Equal);
+ expect(after.formatChange).toBeUndefined();
+ });
},
);
humanReadableTest.openspec('Format detection with text change')(
'Scenario: Format detection with text change',
- () => {
- const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
- const inserted = makeTextAtom('hello changed', CorrelationStatus.Inserted, [el('w:b')]);
- inserted.comparisonUnitAtomBefore = before;
+ async () => {
+ const inserted = await allureStep('Given an inserted atom with different text and formatting', () => {
+ const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
+ const inserted = makeTextAtom('hello changed', CorrelationStatus.Inserted, [el('w:b')]);
+ inserted.comparisonUnitAtomBefore = before;
+ return inserted;
+ });
- detectFormatChangesInAtomList([inserted]);
+ await allureStep('When format change detection is applied', () => {
+ detectFormatChangesInAtomList([inserted]);
+ });
- expect(inserted.correlationStatus).toBe(CorrelationStatus.Inserted);
- expect(inserted.formatChange).toBeUndefined();
+ await allureStep('Then text change takes priority and no format change is reported', () => {
+ expect(inserted.correlationStatus).toBe(CorrelationStatus.Inserted);
+ expect(inserted.formatChange).toBeUndefined();
+ });
},
);
// Run property extraction
humanReadableTest.openspec('Run with properties')(
'Scenario: Run with properties',
- () => {
- const atom = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- const rPr = getRunPropertiesFromAtom(atom);
- assertDefined(rPr, 'rPr');
- expect(childElements(rPr).some((child) => child.tagName === 'w:b')).toBe(true);
+ async () => {
+ const atom = await allureStep('Given an atom with bold run properties', () => {
+ return makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ });
+
+ await allureStep('Then the extracted run properties contain w:b', () => {
+ const rPr = getRunPropertiesFromAtom(atom);
+ assertDefined(rPr, 'rPr');
+ expect(childElements(rPr).some((child) => child.tagName === 'w:b')).toBe(true);
+ });
},
);
humanReadableTest.openspec('Run without properties')(
'Scenario: Run without properties',
- () => {
- const atom = makeTextAtom('hello', CorrelationStatus.Equal, null);
- expect(getRunPropertiesFromAtom(atom)).toBeNull();
+ async () => {
+ const atom = await allureStep('Given an atom with no run properties element', () => {
+ return makeTextAtom('hello', CorrelationStatus.Equal, null);
+ });
+
+ await allureStep('Then extracted run properties return null', () => {
+ expect(getRunPropertiesFromAtom(atom)).toBeNull();
+ });
},
);
// Run property normalization
humanReadableTest.openspec('Normalize null properties')(
'Scenario: Normalize null properties',
- () => {
- const normalized = normalizeRunProperties(null);
- expect(normalized.children).toEqual([]);
+ async () => {
+ const normalized = await allureStep('Given null run properties to normalize', () => {
+ return normalizeRunProperties(null);
+ });
+
+ await allureStep('Then the normalized result has empty children', () => {
+ expect(normalized.children).toEqual([]);
+ });
},
);
humanReadableTest.openspec('Remove existing revision tracking')(
'Scenario: Remove existing revision tracking',
- () => {
- const normalized = normalizeRunProperties(el('w:rPr', {}, [
- el('w:b'),
- el('w:rPrChange', { 'w:id': '1' }),
- ]));
+ async () => {
+ const normalized = await allureStep('Given run properties with w:b and w:rPrChange', () => {
+ return normalizeRunProperties(el('w:rPr', {}, [
+ el('w:b'),
+ el('w:rPrChange', { 'w:id': '1' }),
+ ]));
+ });
- expect(normalized.children?.some((child) => child.tagName === 'w:rPrChange')).toBe(false);
- expect(normalized.children?.some((child) => child.tagName === 'w:b')).toBe(true);
+ await allureStep('Then rPrChange is stripped but w:b is preserved', () => {
+ expect(normalized.children?.some((child) => child.tagName === 'w:rPrChange')).toBe(false);
+ expect(normalized.children?.some((child) => child.tagName === 'w:b')).toBe(true);
+ });
},
);
// Run property comparison
humanReadableTest.openspec('Empty properties equal')(
'Scenario: Empty properties equal',
- () => {
- expect(areRunPropertiesEqual(null, el('w:rPr'))).toBe(true);
+ async () => {
+ await allureStep('Then null and empty w:rPr are considered equal', () => {
+ expect(areRunPropertiesEqual(null, el('w:rPr'))).toBe(true);
+ });
},
);
humanReadableTest.openspec('Different properties')(
'Scenario: Different properties',
- () => {
- expect(areRunPropertiesEqual(
- el('w:rPr', {}, [el('w:b')]),
- el('w:rPr', {}, [el('w:i')]),
- )).toBe(false);
+ async () => {
+ await allureStep('Then bold and italic run properties are not equal', () => {
+ expect(areRunPropertiesEqual(
+ el('w:rPr', {}, [el('w:b')]),
+ el('w:rPr', {}, [el('w:i')]),
+ )).toBe(false);
+ });
},
);
humanReadableTest.openspec('Same properties different order')(
'Scenario: Same properties different order',
- () => {
- expect(areRunPropertiesEqual(
- el('w:rPr', {}, [el('w:b'), el('w:i')]),
- el('w:rPr', {}, [el('w:i'), el('w:b')]),
- )).toBe(true);
+ async () => {
+ await allureStep('Then run properties with same elements in different order are equal', () => {
+ expect(areRunPropertiesEqual(
+ el('w:rPr', {}, [el('w:b'), el('w:i')]),
+ el('w:rPr', {}, [el('w:i'), el('w:b')]),
+ )).toBe(true);
+ });
},
);
// Format detection settings
humanReadableTest.openspec('Format detection disabled')(
'Scenario: Format detection disabled',
- () => {
- const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
- const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- after.comparisonUnitAtomBefore = before;
+ async () => {
+ const after = await allureStep('Given an atom with formatting change and detection disabled', () => {
+ const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
+ const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ after.comparisonUnitAtomBefore = before;
+ return after;
+ });
- detectFormatChangesInAtomList([after], { detectFormatChanges: false });
+ await allureStep('When format detection is explicitly disabled', () => {
+ detectFormatChangesInAtomList([after], { detectFormatChanges: false });
+ });
- expect(after.correlationStatus).toBe(CorrelationStatus.Equal);
- expect(after.formatChange).toBeUndefined();
+ await allureStep('Then the atom remains Equal with no format change', () => {
+ expect(after.correlationStatus).toBe(CorrelationStatus.Equal);
+ expect(after.formatChange).toBeUndefined();
+ });
},
);
humanReadableTest.openspec('Format detection enabled by default')(
'Scenario: Format detection enabled by default',
- () => {
- expect(DEFAULT_FORMAT_DETECTION_SETTINGS.detectFormatChanges).toBe(true);
+ async () => {
+ await allureStep('Then the default settings have format detection enabled', () => {
+ expect(DEFAULT_FORMAT_DETECTION_SETTINGS.detectFormatChanges).toBe(true);
+ });
},
);
// OpenXML format change markup generation
humanReadableTest.openspec('Format change markup structure')(
'Scenario: Format change markup structure',
- () => {
- const markup = generateFormatChangeMarkup({
- oldRunProperties: el('w:rPr', {}, [el('w:b')]),
- newRunProperties: el('w:rPr', {}, [el('w:i')]),
- changedProperties: ['bold', 'italic'],
- }, {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- id: 1,
+ async () => {
+ const markup = await allureStep('Given format change markup from bold to italic', () => {
+ return generateFormatChangeMarkup({
+ oldRunProperties: el('w:rPr', {}, [el('w:b')]),
+ newRunProperties: el('w:rPr', {}, [el('w:i')]),
+ changedProperties: ['bold', 'italic'],
+ }, {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ id: 1,
+ });
});
- expect(markup.tagName).toBe('w:rPrChange');
- expect(markup.getAttribute('w:id')).toBe('1');
- expect(markup.getAttribute('w:author')).toBe('Tester');
- expect(markup.getAttribute('w:date')).toBeDefined();
- expect(childElements(markup)[0]?.tagName).toBe('w:rPr');
+ await allureStep('Then the markup has correct rPrChange structure with author and date', () => {
+ expect(markup.tagName).toBe('w:rPrChange');
+ expect(markup.getAttribute('w:id')).toBe('1');
+ expect(markup.getAttribute('w:author')).toBe('Tester');
+ expect(markup.getAttribute('w:date')).toBeDefined();
+ expect(childElements(markup)[0]?.tagName).toBe('w:rPr');
+ });
},
);
humanReadableTest.openspec('Bold added markup')(
'Scenario: Bold added markup',
- () => {
- const run = el('w:r', {}, [
- el('w:rPr', {}, [el('w:b')]),
- el('w:t', {}, undefined, 'text'),
- ]);
-
- const rPrChange = generateFormatChangeMarkup({
- oldRunProperties: el('w:rPr'),
- newRunProperties: el('w:rPr', {}, [el('w:b')]),
- changedProperties: ['bold'],
- }, {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- id: 2,
+ async () => {
+ const run = await allureStep('Given a run with bold and a format change markup for bold addition', () => {
+ const run = el('w:r', {}, [
+ el('w:rPr', {}, [el('w:b')]),
+ el('w:t', {}, undefined, 'text'),
+ ]);
+
+ const rPrChange = generateFormatChangeMarkup({
+ oldRunProperties: el('w:rPr'),
+ newRunProperties: el('w:rPr', {}, [el('w:b')]),
+ changedProperties: ['bold'],
+ }, {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ id: 2,
+ });
+
+ mergeFormatChangeIntoRun(run, rPrChange);
+ return run;
});
- mergeFormatChangeIntoRun(run, rPrChange);
-
- const rPr = childElements(run).find((child) => child.tagName === 'w:rPr');
- assertDefined(rPr, 'rPr');
- const insertedEl = childElements(rPr).find((child) => child.tagName === 'w:rPrChange');
- assertDefined(insertedEl, 'rPrChange');
- const oldRPr = childElements(insertedEl)[0];
- assertDefined(oldRPr, 'oldRPr');
- expect(childElements(oldRPr)).toHaveLength(0);
+ await allureStep('Then the old run properties in rPrChange are empty (bold was added)', () => {
+ const rPr = childElements(run).find((child) => child.tagName === 'w:rPr');
+ assertDefined(rPr, 'rPr');
+ const insertedEl = childElements(rPr).find((child) => child.tagName === 'w:rPrChange');
+ assertDefined(insertedEl, 'rPrChange');
+ const oldRPr = childElements(insertedEl)[0];
+ assertDefined(oldRPr, 'oldRPr');
+ expect(childElements(oldRPr)).toHaveLength(0);
+ });
},
);
humanReadableTest.openspec('Bold removed markup')(
'Scenario: Bold removed markup',
- () => {
- const run = el('w:r', {}, [
- el('w:rPr'),
- el('w:t', {}, undefined, 'text'),
- ]);
-
- const rPrChange = generateFormatChangeMarkup({
- oldRunProperties: el('w:rPr', {}, [el('w:b')]),
- newRunProperties: el('w:rPr'),
- changedProperties: ['bold'],
- }, {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- id: 3,
+ async () => {
+ const run = await allureStep('Given a run without bold and a format change markup for bold removal', () => {
+ const run = el('w:r', {}, [
+ el('w:rPr'),
+ el('w:t', {}, undefined, 'text'),
+ ]);
+
+ const rPrChange = generateFormatChangeMarkup({
+ oldRunProperties: el('w:rPr', {}, [el('w:b')]),
+ newRunProperties: el('w:rPr'),
+ changedProperties: ['bold'],
+ }, {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ id: 3,
+ });
+
+ mergeFormatChangeIntoRun(run, rPrChange);
+ return run;
});
- mergeFormatChangeIntoRun(run, rPrChange);
-
- const rPr = childElements(run).find((child) => child.tagName === 'w:rPr');
- assertDefined(rPr, 'rPr');
- const insertedEl = childElements(rPr).find((child) => child.tagName === 'w:rPrChange');
- assertDefined(insertedEl, 'rPrChange');
- const oldRPr = childElements(insertedEl)[0];
- assertDefined(oldRPr, 'oldRPr');
- expect(childElements(oldRPr).some((child) => child.tagName === 'w:b')).toBe(true);
+ await allureStep('Then the old run properties in rPrChange contain w:b (bold was removed)', () => {
+ const rPr = childElements(run).find((child) => child.tagName === 'w:rPr');
+ assertDefined(rPr, 'rPr');
+ const insertedEl = childElements(rPr).find((child) => child.tagName === 'w:rPrChange');
+ assertDefined(insertedEl, 'rPrChange');
+ const oldRPr = childElements(insertedEl)[0];
+ assertDefined(oldRPr, 'oldRPr');
+ expect(childElements(oldRPr).some((child) => child.tagName === 'w:b')).toBe(true);
+ });
},
);
// Format change revision reporting / property mapping
humanReadableTest.openspec('Get format change revisions')(
'Scenario: Get format change revisions',
- () => {
- const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
- const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
- after.comparisonUnitAtomBefore = before;
-
- detectFormatChangesInAtomList([after]);
- assertDefined(after.formatChange, 'formatChange');
+ async () => {
+ const after = await allureStep('Given an atom with bold added and format change detected', () => {
+ const before = makeTextAtom('hello', CorrelationStatus.Equal, []);
+ const after = makeTextAtom('hello', CorrelationStatus.Equal, [el('w:b')]);
+ after.comparisonUnitAtomBefore = before;
+ detectFormatChangesInAtomList([after]);
+ return after;
+ });
- const markup = generateFormatChangeMarkup(after.formatChange, {
- author: 'Tester',
- dateTime: new Date('2026-01-01T00:00:00.000Z'),
- id: 4,
+ const markup = await allureStep('When format change markup is generated from the detected change', () => {
+ assertDefined(after.formatChange, 'formatChange');
+ return generateFormatChangeMarkup(after.formatChange, {
+ author: 'Tester',
+ dateTime: new Date('2026-01-01T00:00:00.000Z'),
+ id: 4,
+ });
});
- expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
- expect(after.formatChange.changedProperties).toContain('bold');
- expect(markup.getAttribute('w:author')).toBe('Tester');
- expect(markup.getAttribute('w:date')).toBeDefined();
+ await allureStep('Then the revision has correct status, properties, and author', () => {
+ expect(after.correlationStatus).toBe(CorrelationStatus.FormatChanged);
+ expect(after.formatChange!.changedProperties).toContain('bold');
+ expect(markup.getAttribute('w:author')).toBe('Tester');
+ expect(markup.getAttribute('w:date')).toBeDefined();
+ });
},
);
humanReadableTest.openspec('Unknown property name')(
'Scenario: Unknown property name',
- () => {
- const changed = getChangedPropertyNames(
- el('w:rPr'),
- el('w:rPr', {}, [el('w:emboss')]),
- );
- expect(changed.some((name) => name.endsWith('emboss'))).toBe(true);
+ async () => {
+ const changed = await allureStep('Given a run property with unknown element w:emboss', () => {
+ return getChangedPropertyNames(
+ el('w:rPr'),
+ el('w:rPr', {}, [el('w:emboss')]),
+ );
+ });
+
+ await allureStep('Then the changed property name includes emboss', () => {
+ expect(changed.some((name) => name.endsWith('emboss'))).toBe(true);
+ });
},
);
// Additional mapping for explicit footnote parsing API scenario
// (keeps the mapping anchored to concrete exported behavior)
- humanReadableTest.openspec('Building footnote mapping')(
+ humanReadableTest.openspec('Building footnote mapping preserves document order')(
'Scenario: Building footnote mapping preserves document order in references',
- () => {
- const doc = createDocumentWithFootnotes(['9', '3', '5']);
- const refs = findReferencesInOrder(doc, 'w:footnoteReference');
- expect(refs.map((ref) => ref.getAttribute('w:id'))).toEqual(['9', '3', '5']);
+ async () => {
+ const refs = await allureStep('Given a document with footnote references 9, 3, 5', () => {
+ const doc = createDocumentWithFootnotes(['9', '3', '5']);
+ return findReferencesInOrder(doc, 'w:footnoteReference');
+ });
+
+ await allureStep('Then references are returned in document order', () => {
+ expect(refs.map((ref) => ref.getAttribute('w:id'))).toEqual(['9', '3', '5']);
+ });
},
);
});
diff --git a/packages/docx-core/test-primitives/accept_changes.test.ts b/packages/docx-core/src/primitives/accept_changes.test.ts
similarity index 96%
rename from packages/docx-core/test-primitives/accept_changes.test.ts
rename to packages/docx-core/src/primitives/accept_changes.test.ts
index 78fbefd..b427674 100644
--- a/packages/docx-core/test-primitives/accept_changes.test.ts
+++ b/packages/docx-core/src/primitives/accept_changes.test.ts
@@ -1,7 +1,7 @@
import { describe, expect } from 'vitest';
-import { acceptChanges } from '../src/primitives/accept_changes.js';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
-import { itAllure, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
+import { acceptChanges } from './accept_changes.js';
+import { parseXml, serializeXml } from './xml.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
const TEST_FEATURE = 'add-accept-tracked-changes';
const it = itAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE });
diff --git a/packages/docx-core/test-primitives/bookmarks.test.ts b/packages/docx-core/src/primitives/bookmarks.test.ts
similarity index 89%
rename from packages/docx-core/test-primitives/bookmarks.test.ts
rename to packages/docx-core/src/primitives/bookmarks.test.ts
index 803d502..2b368a5 100644
--- a/packages/docx-core/test-primitives/bookmarks.test.ts
+++ b/packages/docx-core/src/primitives/bookmarks.test.ts
@@ -1,8 +1,8 @@
import { describe, expect } from 'vitest';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML } from '../src/primitives/namespaces.js';
-import { getParagraphBookmarkId, insertParagraphBookmarks } from '../src/primitives/bookmarks.js';
-import { itAllure, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
+import { parseXml } from './xml.js';
+import { OOXML } from './namespaces.js';
+import { getParagraphBookmarkId, insertParagraphBookmarks } from './bookmarks.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
const TEST_FEATURE = 'docx-primitives';
@@ -37,7 +37,7 @@ describe('Traceability: docx-primitives — Paragraph Bookmarks', () => {
await allureStep('Then each paragraph SHALL receive a _bk_* identifier matching the pattern', () => {
const paras = doc.getElementsByTagNameNS(OOXML.W_NS, 'p');
for (let i = 0; i < paras.length; i++) {
- const id = getParagraphBookmarkId(paras[i]);
+ const id = getParagraphBookmarkId(paras[i]!);
expect(id).not.toBeNull();
expect(id).toMatch(/^_bk_[0-9a-f]{12}$/);
}
@@ -52,7 +52,7 @@ describe('Traceability: docx-primitives — Paragraph Bookmarks', () => {
});
const id = await allureStep('When getParagraphBookmarkId is called', async () => {
- const para = doc.getElementsByTagNameNS(OOXML.W_NS, 'p')[0];
+ const para = doc.getElementsByTagNameNS(OOXML.W_NS, 'p')[0]!;
const result = getParagraphBookmarkId(para);
await allureJsonAttachment('Result', { id: result });
return result;
diff --git a/packages/docx-core/src/primitives/comments.delete.test.ts b/packages/docx-core/src/primitives/comments.delete.test.ts
new file mode 100644
index 0000000..5b58b2d
--- /dev/null
+++ b/packages/docx-core/src/primitives/comments.delete.test.ts
@@ -0,0 +1,208 @@
+import { describe, expect } from 'vitest';
+import JSZip from 'jszip';
+import { itAllure, allureStep } from './testing/allure-test.js';
+import { parseXml, serializeXml } from './xml.js';
+import { OOXML, W } from './namespaces.js';
+import { DocxZip } from './zip.js';
+import {
+ addComment,
+ addCommentReply,
+ bootstrapCommentParts,
+ deleteComment,
+ getComments,
+} from './comments.js';
+
+const TEST_FEATURE = 'add-comment-delete-tool';
+const it = itAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE });
+const W_NS = OOXML.W_NS;
+
+function makeDocXml(bodyXml: string): string {
+ return (
+ `` +
+ `` +
+ `${bodyXml}` +
+ ``
+ );
+}
+
+async function makeDocxBuffer(bodyXml: string): Promise {
+ const zip = new JSZip();
+ zip.file('word/document.xml', makeDocXml(bodyXml));
+ return (await zip.generateAsync({ type: 'nodebuffer' })) as Buffer;
+}
+
+async function loadZip(buffer: Buffer): Promise {
+ return DocxZip.load(buffer);
+}
+
+describe('deleteComment OpenSpec traceability', () => {
+ it
+ .openspec('delete root comment with no replies')
+ ('Scenario: delete root comment with no replies', async () => {
+ const { doc, zip, root } = await allureStep('Given a document with a single root comment', async () => {
+ const zip = await loadZip(await makeDocxBuffer('Hello world'));
+ await bootstrapCommentParts(zip);
+ const doc = parseXml(await zip.readText('word/document.xml'));
+ const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ const root = await addComment(doc, zip, {
+ paragraphEl: paragraph,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root comment',
+ });
+ return { doc, zip, root };
+ });
+
+ await allureStep('When the root comment is deleted', async () => {
+ await deleteComment(doc, zip, { commentId: root.commentId });
+ });
+
+ await allureStep('Then no comments remain and body is intact', async () => {
+ const comments = await getComments(zip, doc);
+ expect(comments).toEqual([]);
+ const serialized = serializeXml(doc);
+ expect(serialized).toContain('');
+ });
+ });
+
+ it
+ .openspec('delete root comment cascade-deletes all descendants')
+ ('Scenario: delete root comment cascade-deletes all descendants', async () => {
+ const { doc, zip, root } = await allureStep('Given a root comment with a child and grandchild reply', async () => {
+ const zip = await loadZip(await makeDocxBuffer('Hello world'));
+ await bootstrapCommentParts(zip);
+ const doc = parseXml(await zip.readText('word/document.xml'));
+ const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ const root = await addComment(doc, zip, {
+ paragraphEl: paragraph,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root comment',
+ });
+ const child = await addCommentReply(doc, zip, {
+ parentCommentId: root.commentId,
+ author: 'Child',
+ text: 'First reply',
+ });
+ await addCommentReply(doc, zip, {
+ parentCommentId: child.commentId,
+ author: 'Grandchild',
+ text: 'Second reply',
+ });
+ return { doc, zip, root };
+ });
+
+ await allureStep('When the root comment is deleted', async () => {
+ await deleteComment(doc, zip, { commentId: root.commentId });
+ });
+
+ await allureStep('Then all comments and replies are removed', async () => {
+ const comments = await getComments(zip, doc);
+ expect(comments).toEqual([]);
+ const commentsXml = await zip.readText('word/comments.xml');
+ expect(commentsXml).not.toContain('Root comment');
+ expect(commentsXml).not.toContain('First reply');
+ expect(commentsXml).not.toContain('Second reply');
+ });
+ });
+
+ it
+ .openspec('delete a leaf reply comment')
+ ('Scenario: delete a leaf reply comment', async () => {
+ const { doc, zip, leaf } = await allureStep('Given a root comment with a single leaf reply', async () => {
+ const zip = await loadZip(await makeDocxBuffer('Hello world'));
+ await bootstrapCommentParts(zip);
+ const doc = parseXml(await zip.readText('word/document.xml'));
+ const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ const root = await addComment(doc, zip, {
+ paragraphEl: paragraph,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root comment',
+ });
+ const leaf = await addCommentReply(doc, zip, {
+ parentCommentId: root.commentId,
+ author: 'Leaf',
+ text: 'Leaf reply',
+ });
+ return { doc, zip, leaf };
+ });
+
+ await allureStep('When the leaf reply is deleted', async () => {
+ await deleteComment(doc, zip, { commentId: leaf.commentId });
+ });
+
+ await allureStep('Then only the root comment remains with no replies', async () => {
+ const comments = await getComments(zip, doc);
+ expect(comments).toHaveLength(1);
+ expect(comments[0]!.text).toBe('Root comment');
+ expect(comments[0]!.replies).toEqual([]);
+ });
+ });
+
+ it
+ .openspec('delete a non-leaf reply cascades to its descendants')
+ ('Scenario: delete a non-leaf reply cascades to its descendants', async () => {
+ const { doc, zip, nonLeafReply } = await allureStep('Given a root comment with a two-level reply chain', async () => {
+ const zip = await loadZip(await makeDocxBuffer('Hello world'));
+ await bootstrapCommentParts(zip);
+ const doc = parseXml(await zip.readText('word/document.xml'));
+ const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ const root = await addComment(doc, zip, {
+ paragraphEl: paragraph,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root comment',
+ });
+ const nonLeafReply = await addCommentReply(doc, zip, {
+ parentCommentId: root.commentId,
+ author: 'Reply',
+ text: 'Reply level 1',
+ });
+ await addCommentReply(doc, zip, {
+ parentCommentId: nonLeafReply.commentId,
+ author: 'Reply',
+ text: 'Reply level 2',
+ });
+ return { doc, zip, nonLeafReply };
+ });
+
+ await allureStep('When the non-leaf reply is deleted', async () => {
+ await deleteComment(doc, zip, { commentId: nonLeafReply.commentId });
+ });
+
+ await allureStep('Then only the root comment remains with no replies', async () => {
+ const comments = await getComments(zip, doc);
+ expect(comments).toHaveLength(1);
+ expect(comments[0]!.text).toBe('Root comment');
+ expect(comments[0]!.replies).toEqual([]);
+ });
+ });
+
+ it
+ .openspec('comment not found returns error')
+ ('Scenario: comment not found returns error', async () => {
+ const { doc, zip } = await allureStep('Given a document with one comment', async () => {
+ const zip = await loadZip(await makeDocxBuffer('Hello world'));
+ await bootstrapCommentParts(zip);
+ const doc = parseXml(await zip.readText('word/document.xml'));
+ const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ await addComment(doc, zip, {
+ paragraphEl: paragraph,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root comment',
+ });
+ return { doc, zip };
+ });
+
+ await allureStep('When/Then deleting a non-existent comment ID rejects with not-found', async () => {
+ await expect(deleteComment(doc, zip, { commentId: 999 })).rejects.toThrow(/not found/i);
+ });
+ });
+});
diff --git a/packages/docx-core/src/primitives/comments.test.ts b/packages/docx-core/src/primitives/comments.test.ts
index 253e38b..d1e0721 100644
--- a/packages/docx-core/src/primitives/comments.test.ts
+++ b/packages/docx-core/src/primitives/comments.test.ts
@@ -1,9 +1,10 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from '../testing/allure-test.js';
+import { itAllure as it, allureStep } from './testing/allure-test.js';
import JSZip from 'jszip';
import { parseXml, serializeXml } from './xml.js';
import { OOXML, W } from './namespaces.js';
import { DocxZip } from './zip.js';
+import { DocxDocument } from './document.js';
import {
bootstrapCommentParts,
addComment,
@@ -354,3 +355,605 @@ describe('comments — edge cases and branch coverage', () => {
});
});
});
+
+describe('comments', () => {
+ describe('bootstrapCommentParts', () => {
+ it('creates all required comment parts when none exist', async () => {
+ const buf = await makeDocxBuffer('Hello');
+ const zip = await loadZip(buf);
+
+ expect(zip.hasFile('word/comments.xml')).toBe(false);
+ expect(zip.hasFile('word/commentsExtended.xml')).toBe(false);
+ expect(zip.hasFile('word/people.xml')).toBe(false);
+
+ const result = await bootstrapCommentParts(zip);
+
+ expect(result.partsCreated).toContain('word/comments.xml');
+ expect(result.partsCreated).toContain('word/commentsExtended.xml');
+ expect(result.partsCreated).toContain('word/people.xml');
+ expect(zip.hasFile('word/comments.xml')).toBe(true);
+ expect(zip.hasFile('word/commentsExtended.xml')).toBe(true);
+ expect(zip.hasFile('word/people.xml')).toBe(true);
+ });
+
+ it('is idempotent — does not duplicate parts on second call', async () => {
+ const buf = await makeDocxBuffer('Hello');
+ const zip = await loadZip(buf);
+
+ const first = await bootstrapCommentParts(zip);
+ expect(first.partsCreated.length).toBe(3);
+
+ const second = await bootstrapCommentParts(zip);
+ expect(second.partsCreated.length).toBe(0);
+ });
+
+ it('adds correct Content-Type overrides', async () => {
+ const buf = await makeDocxBuffer('Hello');
+ const zip = await loadZip(buf);
+
+ await bootstrapCommentParts(zip);
+
+ const ctXml = await zip.readText('[Content_Types].xml');
+ expect(ctXml).toContain('word/comments.xml');
+ expect(ctXml).toContain('word/commentsExtended.xml');
+ expect(ctXml).toContain('word/people.xml');
+ });
+
+ it('adds correct relationship entries', async () => {
+ const buf = await makeDocxBuffer('Hello');
+ const zip = await loadZip(buf);
+
+ await bootstrapCommentParts(zip);
+
+ const relsXml = await zip.readText('word/_rels/document.xml.rels');
+ expect(relsXml).toContain('comments.xml');
+ expect(relsXml).toContain('commentsExtended.xml');
+ expect(relsXml).toContain('people.xml');
+ });
+ });
+
+ describe('addComment', () => {
+ it('inserts commentRangeStart/commentRangeEnd markers', async () => {
+ const bodyXml = 'Hello World';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Test Author',
+ text: 'A comment',
+ });
+
+ const serialized = serializeXml(doc);
+ expect(serialized).toContain('commentRangeStart');
+ expect(serialized).toContain('commentRangeEnd');
+ });
+
+ it('inserts commentReference run after range end', async () => {
+ const bodyXml = 'Hello World';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Test Author',
+ text: 'A comment',
+ });
+
+ const serialized = serializeXml(doc);
+ expect(serialized).toContain('commentReference');
+ });
+
+ it('allocates sequential comment IDs', async () => {
+ const bodyXml = 'Hello World Foo';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ const r1 = await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Author A',
+ text: 'First',
+ });
+ const r2 = await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 6,
+ end: 11,
+ author: 'Author B',
+ text: 'Second',
+ });
+
+ expect(r1.commentId).toBe(0);
+ expect(r2.commentId).toBe(1);
+ });
+
+ it('comment body includes annotationRef element', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Test',
+ text: 'Note',
+ });
+
+ const commentsXml = await zip.readText('word/comments.xml');
+ expect(commentsXml).toContain('annotationRef');
+ expect(commentsXml).toContain('Note');
+ });
+
+ it('adds author to people.xml', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Jane Doe',
+ text: 'Hi',
+ });
+
+ const peopleXml = await zip.readText('word/people.xml');
+ expect(peopleXml).toContain('Jane Doe');
+ });
+ });
+
+ describe('addCommentReply', () => {
+ it('links reply to parent via commentsExtended.xml paraIdParent', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ const root = await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root',
+ });
+
+ const reply = await addCommentReply(doc, zip, {
+ parentCommentId: root.commentId,
+ author: 'Replier',
+ text: 'Reply text',
+ });
+
+ expect(reply.parentCommentId).toBe(root.commentId);
+
+ const extXml = await zip.readText('word/commentsExtended.xml');
+ expect(extXml).toContain('paraIdParent');
+ });
+
+ it('reply has no range markers in document body', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ const root = await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root',
+ });
+
+ const beforeXml = serializeXml(doc);
+ const beforeStartCount = (beforeXml.match(/commentRangeStart/g) ?? []).length;
+ const beforeEndCount = (beforeXml.match(/commentRangeEnd/g) ?? []).length;
+
+ await addCommentReply(doc, zip, {
+ parentCommentId: root.commentId,
+ author: 'Replier',
+ text: 'Reply',
+ });
+
+ const afterXml = serializeXml(doc);
+ const afterStartCount = (afterXml.match(/commentRangeStart/g) ?? []).length;
+ const afterEndCount = (afterXml.match(/commentRangeEnd/g) ?? []).length;
+ expect(afterStartCount).toBe(beforeStartCount);
+ expect(afterEndCount).toBe(beforeEndCount);
+ });
+
+ it('supports multiple replies to same parent', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+
+ const root = await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Author',
+ text: 'Root',
+ });
+
+ const r1 = await addCommentReply(doc, zip, {
+ parentCommentId: root.commentId,
+ author: 'Reply1',
+ text: 'First reply',
+ });
+
+ const r2 = await addCommentReply(doc, zip, {
+ parentCommentId: root.commentId,
+ author: 'Reply2',
+ text: 'Second reply',
+ });
+
+ expect(r1.commentId).not.toBe(r2.commentId);
+ expect(r1.parentCommentId).toBe(root.commentId);
+ expect(r2.parentCommentId).toBe(root.commentId);
+
+ const commentsXml = await zip.readText('word/comments.xml');
+ expect(commentsXml).toContain('First reply');
+ expect(commentsXml).toContain('Second reply');
+ });
+ });
+
+ describe('round-trip', () => {
+ it('comment survives toBuffer → reload cycle', async () => {
+ const bodyXml = 'Hello World';
+ const buf = await makeDocxBuffer(bodyXml);
+ const doc = await DocxDocument.load(buf);
+ doc.insertParagraphBookmarks('test_attachment');
+
+ const { paragraphs } = doc.readParagraphs();
+ expect(paragraphs.length).toBeGreaterThan(0);
+ const paraId = paragraphs[0]!.id;
+ expect(paraId).toBeTruthy();
+
+ await doc.addComment({
+ paragraphId: paraId,
+ start: 0,
+ end: 5,
+ author: 'Round Trip',
+ text: 'Survives reload',
+ });
+
+ const { buffer } = await doc.toBuffer();
+
+ const reloadedZip = await DocxZip.load(buffer);
+ const commentsXml = await reloadedZip.readText('word/comments.xml');
+ expect(commentsXml).toContain('Survives reload');
+ expect(commentsXml).toContain('Round Trip');
+ });
+ });
+
+ describe('getComments', () => {
+ it('returns empty array when no comments.xml exists', async () => {
+
+ let comments: Awaited>;
+
+ await allureStep('Given a document with no comment parts', async () => {
+ const buf = await makeDocxBuffer('Hello');
+ const zip = await loadZip(buf);
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ comments = await getComments(zip, doc);
+ });
+
+ await allureStep('Then getComments returns an empty array', async () => {
+ expect(comments).toEqual([]);
+ });
+ });
+
+ it('reads comments written by addComment', async () => {
+
+ let zip: DocxZip;
+ let doc: Document;
+ let comments: Awaited>;
+
+ await allureStep('Given a document with a comment added via addComment', async () => {
+ const bodyXml = 'Hello World';
+ const buf = await makeDocxBuffer(bodyXml);
+ zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+ const docXml = await zip.readText('word/document.xml');
+ doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ await addComment(doc, zip, {
+ paragraphEl: p,
+ start: 0,
+ end: 5,
+ author: 'Alice',
+ text: 'Nice intro',
+ initials: 'A',
+ });
+ });
+
+ await allureStep('When reading comments via getComments', async () => {
+ comments = await getComments(zip, doc);
+ });
+
+ await allureStep('Then exactly one comment is returned', async () => {
+ expect(comments).toHaveLength(1);
+ });
+
+ await allureStep('And comment ID is 0', async () => {
+ expect(comments[0]!.id).toBe(0);
+ });
+
+ await allureStep('And author is Alice', async () => {
+ expect(comments[0]!.author).toBe('Alice');
+ });
+
+ await allureStep('And text is "Nice intro"', async () => {
+ expect(comments[0]!.text).toBe('Nice intro');
+ });
+
+ await allureStep('And initials is "A"', async () => {
+ expect(comments[0]!.initials).toBe('A');
+ });
+
+ await allureStep('And date is populated', async () => {
+ expect(comments[0]!.date).toBeTruthy();
+ });
+
+ await allureStep('And paragraphId is populated', async () => {
+ expect(comments[0]!.paragraphId).toBeTruthy();
+ });
+
+ await allureStep('And replies array is empty', async () => {
+ expect(comments[0]!.replies).toEqual([]);
+ });
+ });
+
+ it('reads multiple comments', async () => {
+
+ let zip: DocxZip;
+ let doc: Document;
+ let comments: Awaited>;
+
+ await allureStep('Given a document with two comments on different ranges', async () => {
+ const bodyXml = 'Hello World Foo';
+ const buf = await makeDocxBuffer(bodyXml);
+ zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+ const docXml = await zip.readText('word/document.xml');
+ doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'Alice', text: 'First comment' });
+ await addComment(doc, zip, { paragraphEl: p, start: 6, end: 11, author: 'Bob', text: 'Second comment' });
+ });
+
+ await allureStep('When reading comments via getComments', async () => {
+ comments = await getComments(zip, doc);
+ });
+
+ await allureStep('Then two comments are returned', async () => {
+ expect(comments).toHaveLength(2);
+ });
+
+ await allureStep('And first comment text is "First comment"', async () => {
+ expect(comments[0]!.text).toBe('First comment');
+ });
+
+ await allureStep('And second comment text is "Second comment"', async () => {
+ expect(comments[1]!.text).toBe('Second comment');
+ });
+ });
+
+ it('builds threaded replies from addCommentReply', async () => {
+
+ let zip: DocxZip;
+ let doc: Document;
+ let comments: Awaited>;
+
+ await allureStep('Given a root comment with two replies', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+ const docXml = await zip.readText('word/document.xml');
+ doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ const root = await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'Author', text: 'Root comment' });
+ await addCommentReply(doc, zip, { parentCommentId: root.commentId, author: 'Replier', text: 'Reply one' });
+ await addCommentReply(doc, zip, { parentCommentId: root.commentId, author: 'Replier2', text: 'Reply two' });
+ });
+
+ await allureStep('When reading comments via getComments', async () => {
+ comments = await getComments(zip, doc);
+ });
+
+ await allureStep('Then only one root comment is returned at top level', async () => {
+ expect(comments).toHaveLength(1);
+ });
+
+ await allureStep('And root comment text is "Root comment"', async () => {
+ expect(comments[0]!.text).toBe('Root comment');
+ });
+
+ await allureStep('And root comment has two replies', async () => {
+ expect(comments[0]!.replies).toHaveLength(2);
+ });
+
+ await allureStep('And first reply text is "Reply one" by "Replier"', async () => {
+ expect(comments[0]!.replies[0]!.text).toBe('Reply one');
+ expect(comments[0]!.replies[0]!.author).toBe('Replier');
+ });
+
+ await allureStep('And second reply text is "Reply two"', async () => {
+ expect(comments[0]!.replies[1]!.text).toBe('Reply two');
+ });
+ });
+
+ it('round-trip: write comments, save, reload, read back', async () => {
+
+ let buffer: Buffer;
+ let comments: Awaited>;
+
+ await allureStep('Given a document with a comment and a reply', async () => {
+ const bodyXml = 'Hello World';
+ const buf = await makeDocxBuffer(bodyXml);
+ const doc = await DocxDocument.load(buf);
+ doc.insertParagraphBookmarks('test_attachment');
+ const { paragraphs } = doc.readParagraphs();
+ const paraId = paragraphs[0]!.id;
+ await doc.addComment({ paragraphId: paraId, start: 0, end: 5, author: 'RoundTrip Author', text: 'Round trip comment' });
+ const replyResult = await doc.addCommentReply({ parentCommentId: 0, author: 'Reply Author', text: 'Round trip reply' });
+ expect(replyResult.parentCommentId).toBe(0);
+ ({ buffer } = await doc.toBuffer());
+ });
+
+ await allureStep('When reloading from buffer and reading comments', async () => {
+ const reloaded = await DocxDocument.load(buffer);
+ comments = await reloaded.getComments();
+ });
+
+ await allureStep('Then one root comment is returned', async () => {
+ expect(comments).toHaveLength(1);
+ });
+
+ await allureStep('And root comment text matches "Round trip comment"', async () => {
+ expect(comments[0]!.text).toBe('Round trip comment');
+ });
+
+ await allureStep('And root comment author matches "RoundTrip Author"', async () => {
+ expect(comments[0]!.author).toBe('RoundTrip Author');
+ });
+
+ await allureStep('And reply is preserved with correct text', async () => {
+ expect(comments[0]!.replies).toHaveLength(1);
+ expect(comments[0]!.replies[0]!.text).toBe('Round trip reply');
+ });
+ });
+ });
+
+ describe('getComment', () => {
+ it('finds a root comment by ID', async () => {
+
+ let zip: DocxZip;
+ let doc: Document;
+ let found: Awaited>;
+
+ await allureStep('Given a document with one comment (ID 0)', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+ const docXml = await zip.readText('word/document.xml');
+ doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'FindMe', text: 'Target comment' });
+ });
+
+ await allureStep('When looking up comment by ID 0', async () => {
+ found = await getComment(zip, doc, 0);
+ });
+
+ await allureStep('Then the comment is found', async () => {
+ expect(found).not.toBeNull();
+ });
+
+ await allureStep('And text is "Target comment"', async () => {
+ expect(found!.text).toBe('Target comment');
+ });
+
+ await allureStep('And author is "FindMe"', async () => {
+ expect(found!.author).toBe('FindMe');
+ });
+ });
+
+ it('finds a reply comment by ID', async () => {
+
+ let zip: DocxZip;
+ let doc: Document;
+ let replyId: number;
+ let found: Awaited>;
+
+ await allureStep('Given a root comment with a reply', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+ const docXml = await zip.readText('word/document.xml');
+ doc = parseXml(docXml);
+ const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
+ const root = await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'Root', text: 'Root' });
+ const reply = await addCommentReply(doc, zip, { parentCommentId: root.commentId, author: 'Reply', text: 'Nested reply' });
+ replyId = reply.commentId;
+ });
+
+ await allureStep('When looking up the reply by its ID', async () => {
+ found = await getComment(zip, doc, replyId);
+ });
+
+ await allureStep('Then the reply is found', async () => {
+ expect(found).not.toBeNull();
+ });
+
+ await allureStep('And text is "Nested reply"', async () => {
+ expect(found!.text).toBe('Nested reply');
+ });
+ });
+
+ it('returns null for non-existent ID', async () => {
+
+ let found: Awaited>;
+
+ await allureStep('Given a document with no comments', async () => {
+ const bodyXml = 'Hello';
+ const buf = await makeDocxBuffer(bodyXml);
+ const zip = await loadZip(buf);
+ await bootstrapCommentParts(zip);
+ const docXml = await zip.readText('word/document.xml');
+ const doc = parseXml(docXml);
+ found = await getComment(zip, doc, 999);
+ });
+
+ await allureStep('Then getComment returns null', async () => {
+ expect(found).toBeNull();
+ });
+ });
+ });
+});
diff --git a/packages/docx-core/test-primitives/document.branch.test.ts b/packages/docx-core/src/primitives/document.branch.test.ts
similarity index 96%
rename from packages/docx-core/test-primitives/document.branch.test.ts
rename to packages/docx-core/src/primitives/document.branch.test.ts
index 79ef77d..cc533a7 100644
--- a/packages/docx-core/test-primitives/document.branch.test.ts
+++ b/packages/docx-core/src/primitives/document.branch.test.ts
@@ -1,9 +1,9 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
+import { itAllure as it } from './testing/allure-test.js';
import JSZip from 'jszip';
-import { DocxDocument } from '../src/primitives/document.js';
-import { DocxZip } from '../src/primitives/zip.js';
-import { getParagraphBookmarkId } from '../src/primitives/bookmarks.js';
+import { DocxDocument } from './document.js';
+import { DocxZip } from './zip.js';
+import { getParagraphBookmarkId } from './bookmarks.js';
const test = it.epic('DOCX Primitives').withLabels({ feature: 'Document' });
diff --git a/packages/docx-core/test-primitives/document.test.ts b/packages/docx-core/src/primitives/document.test.ts
similarity index 97%
rename from packages/docx-core/test-primitives/document.test.ts
rename to packages/docx-core/src/primitives/document.test.ts
index 6cd47ab..8dff938 100644
--- a/packages/docx-core/test-primitives/document.test.ts
+++ b/packages/docx-core/src/primitives/document.test.ts
@@ -1,8 +1,8 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
+import { itAllure as it } from './testing/allure-test.js';
import JSZip from 'jszip';
-import { DocxDocument } from '../src/primitives/document.js';
-import { DocxZip } from '../src/primitives/zip.js';
+import { DocxDocument } from './document.js';
+import { DocxZip } from './zip.js';
const W_NS = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main';
diff --git a/packages/docx-core/test-primitives/insert_paragraph_style_source.traceability.test.ts b/packages/docx-core/src/primitives/document.traceability.test.ts
similarity index 98%
rename from packages/docx-core/test-primitives/insert_paragraph_style_source.traceability.test.ts
rename to packages/docx-core/src/primitives/document.traceability.test.ts
index eefbc4d..3b50aac 100644
--- a/packages/docx-core/test-primitives/insert_paragraph_style_source.traceability.test.ts
+++ b/packages/docx-core/src/primitives/document.traceability.test.ts
@@ -1,8 +1,8 @@
import { describe, expect } from 'vitest';
import JSZip from 'jszip';
-import { itAllure as it, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
-import { DocxDocument } from '../src/primitives/document.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
+import { itAllure as it, allureStep, allureJsonAttachment } from './testing/allure-test.js';
+import { DocxDocument } from './document.js';
+import { OOXML, W } from './namespaces.js';
const TEST_FEATURE = 'add-apply-plan-and-style-source';
const test = it.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE });
diff --git a/packages/docx-core/test-primitives/document_view.branch.test.ts b/packages/docx-core/src/primitives/document_view.branch.test.ts
similarity index 97%
rename from packages/docx-core/test-primitives/document_view.branch.test.ts
rename to packages/docx-core/src/primitives/document_view.branch.test.ts
index 67c2a92..6e72a02 100644
--- a/packages/docx-core/test-primitives/document_view.branch.test.ts
+++ b/packages/docx-core/src/primitives/document_view.branch.test.ts
@@ -1,14 +1,14 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
+import { itAllure as it } from './testing/allure-test.js';
import {
buildDocumentView,
buildNodesForDocumentView,
discoverStyles,
renderToon,
type DocumentViewNode,
-} from '../src/primitives/document_view.js';
-import { LabelType } from '../src/primitives/list_labels.js';
-import { parseXml } from '../src/primitives/xml.js';
+} from './document_view.js';
+import { LabelType } from './list_labels.js';
+import { parseXml } from './xml.js';
const test = it.epic('DOCX Primitives').withLabels({ feature: 'Document View' });
diff --git a/packages/docx-core/test-primitives/document_view_formatting.test.ts b/packages/docx-core/src/primitives/document_view.formatting.test.ts
similarity index 51%
rename from packages/docx-core/test-primitives/document_view_formatting.test.ts
rename to packages/docx-core/src/primitives/document_view.formatting.test.ts
index e658e84..755d05a 100644
--- a/packages/docx-core/test-primitives/document_view_formatting.test.ts
+++ b/packages/docx-core/src/primitives/document_view.formatting.test.ts
@@ -1,9 +1,8 @@
import { describe, expect } from 'vitest';
-import { testAllure } from './helpers/allure-test.js';
-import { buildNodesForDocumentView, renderToon } from '../src/primitives/document_view.js';
-import { parseXml } from '../src/primitives/xml.js';
-import type { RelsMap } from '../src/primitives/relationships.js';
-import { computeModalBaseline, type AnnotatedRun } from '../src/primitives/formatting_tags.js';
+import { testAllure, allureStep } from './testing/allure-test.js';
+import { buildNodesForDocumentView, renderToon } from './document_view.js';
+import { parseXml } from './xml.js';
+import type { RelsMap } from './relationships.js';
const TEST_FEATURE = 'add-run-level-formatting-visibility';
const test = testAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE });
@@ -68,29 +67,33 @@ describe('document_view formatting tags', () => {
humanReadableTest.openspec('extract bold, italic, underline, highlight tuple per run')(
'extract bold, italic, underline, highlight tuple per run',
- () => {
- const bodyXml =
- `` +
- `BBBBBBBBBBBBBBBBBBBB` +
- `IIIIIIIIIIIIIIIIIIII` +
- `UUUUUUUUUUUUUUUUUUUU` +
- `HHHHHHHHHHHHHHHHHHHH` +
- `PPPPPPPPPPPPPPPPPPPP` +
- ``;
- const paragraphs = makeParagraphs(bodyXml);
- const { nodes } = buildNodesForDocumentView({
- paragraphs,
- stylesXml: null,
- numberingXml: null,
- show_formatting: true,
- include_semantic_tags: true,
+ async () => {
+ const nodes = await allureStep('Given a paragraph with bold, italic, underline, highlight, and plain runs', () => {
+ const bodyXml =
+ `` +
+ `BBBBBBBBBBBBBBBBBBBB` +
+ `IIIIIIIIIIIIIIIIIIII` +
+ `UUUUUUUUUUUUUUUUUUUU` +
+ `HHHHHHHHHHHHHHHHHHHH` +
+ `PPPPPPPPPPPPPPPPPPPP` +
+ ``;
+ const paragraphs = makeParagraphs(bodyXml);
+ return buildNodesForDocumentView({
+ paragraphs,
+ stylesXml: null,
+ numberingXml: null,
+ show_formatting: true,
+ include_semantic_tags: true,
+ }).nodes;
});
- expect(nodes.length).toBe(1);
- expect(nodes[0]!.tagged_text).toContain('BBBBBBBBBBBBBBBBBBBB');
- expect(nodes[0]!.tagged_text).toContain('IIIIIIIIIIIIIIIIIIII');
- expect(nodes[0]!.tagged_text).toContain('UUUUUUUUUUUUUUUUUUUU');
- expect(nodes[0]!.tagged_text).toContain('HHHHHHHHHHHHHHHHHHHH');
+ await allureStep('Then each formatting type emits its own inline tag', () => {
+ expect(nodes.length).toBe(1);
+ expect(nodes[0]!.tagged_text).toContain('BBBBBBBBBBBBBBBBBBBB');
+ expect(nodes[0]!.tagged_text).toContain('IIIIIIIIIIIIIIIIIIII');
+ expect(nodes[0]!.tagged_text).toContain('UUUUUUUUUUUUUUUUUUUU');
+ expect(nodes[0]!.tagged_text).toContain('HHHHHHHHHHHHHHHHHHHH');
+ });
},
);
@@ -114,85 +117,33 @@ describe('document_view formatting tags', () => {
expect(nodes[0]!.tagged_text).not.toContain('');
});
- humanReadableTest.openspec('char-weighted modal baseline selects dominant formatting tuple')(
- 'char-weighted modal baseline selects dominant formatting tuple',
- () => {
- const runs: AnnotatedRun[] = [
- {
- text: 'AAAAAAAAAA',
- formatting: { bold: true, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
- hyperlinkUrl: null,
- charCount: 10,
- isHeaderRun: false,
- },
- {
- text: 'BBBB',
- formatting: { bold: false, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
- hyperlinkUrl: null,
- charCount: 4,
- isHeaderRun: false,
- },
- ];
-
- const baseline = computeModalBaseline(runs);
-
- expect(baseline.bold).toBe(true);
- expect(baseline.italic).toBe(false);
- expect(baseline.underline).toBe(false);
- expect(baseline.suppressed).toBe(true);
- },
- );
-
- humanReadableTest.openspec('tie-break by earliest run when modal weights are equal')(
- 'tie-break by earliest run when modal weights are equal',
- () => {
- const runs: AnnotatedRun[] = [
- {
- text: 'AAAAAA',
- formatting: { bold: true, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
- hyperlinkUrl: null,
- charCount: 6,
- isHeaderRun: false,
- },
- {
- text: 'BBBBBB',
- formatting: { bold: false, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
- hyperlinkUrl: null,
- charCount: 6,
- isHeaderRun: false,
- },
- ];
-
- const baseline = computeModalBaseline(runs);
-
- expect(baseline.bold).toBe(true);
- expect(baseline.suppressed).toBe(false);
- },
- );
-
humanReadableTest.openspec('detect hyperlink runs and extract href')(
'detect hyperlink runs and extract href',
- () => {
- const relsMap: RelsMap = new Map([['rId1', 'https://example.com']]);
- const bodyXml =
- `` +
- `Click ` +
- `here` +
- ` for details.` +
- ``;
- const paragraphs = makeParagraphs(bodyXml);
- const { nodes } = buildNodesForDocumentView({
- paragraphs,
- stylesXml: null,
- numberingXml: null,
- show_formatting: true,
- include_semantic_tags: true,
- relsMap,
+ async () => {
+ const nodes = await allureStep('Given a paragraph with a hyperlink run referencing rId1', () => {
+ const relsMap: RelsMap = new Map([['rId1', 'https://example.com']]);
+ const bodyXml =
+ `` +
+ `Click ` +
+ `here` +
+ ` for details.` +
+ ``;
+ const paragraphs = makeParagraphs(bodyXml);
+ return buildNodesForDocumentView({
+ paragraphs,
+ stylesXml: null,
+ numberingXml: null,
+ show_formatting: true,
+ include_semantic_tags: true,
+ relsMap,
+ }).nodes;
});
- expect(nodes.length).toBe(1);
- expect(nodes[0]!.tagged_text).toContain('here');
- expect(nodes[0]!.tagged_text).toContain('Click ');
+ await allureStep('Then the hyperlink is emitted as an tag with the resolved href', () => {
+ expect(nodes.length).toBe(1);
+ expect(nodes[0]!.tagged_text).toContain('here');
+ expect(nodes[0]!.tagged_text).toContain('Click ');
+ });
},
);
@@ -268,75 +219,79 @@ describe('document_view formatting tags', () => {
humanReadableTest.openspec('suppression disabled when baseline coverage below 60%')(
'suppression disabled when baseline coverage below 60%',
- () => {
- // 59 chars plain + 41 chars bold = 59% plain = suppressed=false (because 59 < 60 threshold)
- // Actually 59% IS < 60%, so suppressed should be false.
+ async () => {
const plain59 = 'A'.repeat(59);
const bold41 = 'B'.repeat(41);
- const bodyXml59 =
- `` +
- `${plain59}` +
- `${bold41}` +
- ``;
- const paragraphs59 = makeParagraphs(bodyXml59);
- const { nodes: nodes59 } = buildNodesForDocumentView({
- paragraphs: paragraphs59,
- stylesXml: null,
- numberingXml: null,
- show_formatting: true,
- include_semantic_tags: true,
+ const plain61 = 'A'.repeat(61);
+ const bold39 = 'B'.repeat(39);
+
+ const nodes59 = await allureStep('Given a paragraph with 59% plain and 41% bold (below 60% threshold)', () => {
+ const bodyXml59 =
+ `` +
+ `${plain59}` +
+ `${bold41}` +
+ ``;
+ const paragraphs59 = makeParagraphs(bodyXml59);
+ return buildNodesForDocumentView({
+ paragraphs: paragraphs59,
+ stylesXml: null,
+ numberingXml: null,
+ show_formatting: true,
+ include_semantic_tags: true,
+ }).nodes;
});
- // With 59% plain, suppressed=false → all runs get absolute tags.
- // The plain text gets no tags (bold=false), the bold text gets .
- expect(nodes59[0]!.tagged_text).toContain(`${bold41}`);
+ await allureStep('Then suppression is disabled and bold text gets tags', () => {
+ expect(nodes59[0]!.tagged_text).toContain(`${bold41}`);
+ });
- // 61 chars plain + 39 chars bold = 61% plain → suppressed=true
- const plain61 = 'A'.repeat(61);
- const bold39 = 'B'.repeat(39);
- const bodyXml61 =
- `` +
- `${plain61}` +
- `${bold39}` +
- ``;
- const paragraphs61 = makeParagraphs(bodyXml61);
- const { nodes: nodes61 } = buildNodesForDocumentView({
- paragraphs: paragraphs61,
- stylesXml: null,
- numberingXml: null,
- show_formatting: true,
- include_semantic_tags: true,
+ const nodes61 = await allureStep('Given a paragraph with 61% plain and 39% bold (above 60% threshold)', () => {
+ const bodyXml61 =
+ `` +
+ `${plain61}` +
+ `${bold39}` +
+ ``;
+ const paragraphs61 = makeParagraphs(bodyXml61);
+ return buildNodesForDocumentView({
+ paragraphs: paragraphs61,
+ stylesXml: null,
+ numberingXml: null,
+ show_formatting: true,
+ include_semantic_tags: true,
+ }).nodes;
});
- // With 61% plain, suppressed=true → only deviations get tags.
- // Bold is a deviation, so it gets .
- expect(nodes61[0]!.tagged_text).toContain(`${bold39}`);
- // The plain portion should NOT be tagged.
- expect(nodes61[0]!.tagged_text).not.toMatch(new RegExp(`${plain61}`));
+ await allureStep('Then suppression is enabled — only deviations get tags', () => {
+ expect(nodes61[0]!.tagged_text).toContain(`${bold39}`);
+ expect(nodes61[0]!.tagged_text).not.toMatch(new RegExp(`${plain61}`));
+ });
},
);
humanReadableTest.openspec('tags nested in consistent order')(
'tags nested in consistent order',
- () => {
- const bodyXml =
- `` +
- `Start text for baseline padding longer text. ` +
- `styled` +
- ` end.` +
- ``;
- const paragraphs = makeParagraphs(bodyXml);
- const { nodes } = buildNodesForDocumentView({
- paragraphs,
- stylesXml: null,
- numberingXml: null,
- show_formatting: true,
- include_semantic_tags: true,
+ async () => {
+ const nodes = await allureStep('Given a paragraph with a run having both italic and underline', () => {
+ const bodyXml =
+ `` +
+ `Start text for baseline padding longer text. ` +
+ `styled` +
+ ` end.` +
+ ``;
+ const paragraphs = makeParagraphs(bodyXml);
+ return buildNodesForDocumentView({
+ paragraphs,
+ stylesXml: null,
+ numberingXml: null,
+ show_formatting: true,
+ include_semantic_tags: true,
+ }).nodes;
});
- expect(nodes.length).toBe(1);
- // Nesting order: → → → →
- expect(nodes[0]!.tagged_text).toContain('styled');
+ await allureStep('Then tags are nested in canonical order: → → → → ', () => {
+ expect(nodes.length).toBe(1);
+ expect(nodes[0]!.tagged_text).toContain('styled');
+ });
},
);
});
diff --git a/packages/docx-core/test-primitives/document_view.test.ts b/packages/docx-core/src/primitives/document_view.test.ts
similarity index 94%
rename from packages/docx-core/test-primitives/document_view.test.ts
rename to packages/docx-core/src/primitives/document_view.test.ts
index e6f1a86..8995feb 100644
--- a/packages/docx-core/test-primitives/document_view.test.ts
+++ b/packages/docx-core/src/primitives/document_view.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { renderToon, type DocumentViewNode } from '../src/primitives/document_view.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { renderToon, type DocumentViewNode } from './document_view.js';
function makeNode(overrides: Partial): DocumentViewNode {
return {
diff --git a/packages/docx-core/test-primitives/dom-helpers.test.ts b/packages/docx-core/src/primitives/dom-helpers.test.ts
similarity index 97%
rename from packages/docx-core/test-primitives/dom-helpers.test.ts
rename to packages/docx-core/src/primitives/dom-helpers.test.ts
index d68e4ab..41f6209 100644
--- a/packages/docx-core/test-primitives/dom-helpers.test.ts
+++ b/packages/docx-core/src/primitives/dom-helpers.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { parseXml, serializeXml } from './xml.js';
import {
getLeafText,
setLeafText,
@@ -15,7 +15,7 @@ import {
createWmlElement,
createWmlTextElement,
NODE_TYPE,
-} from '../src/primitives/dom-helpers.js';
+} from './dom-helpers.js';
// ── Helpers ────────────────────────────────────────────────────────
@@ -27,9 +27,6 @@ function makeDoc(bodyXml: string): Document {
);
}
-function getBody(doc: Document): Element {
- return doc.getElementsByTagName('w:body')[0]!;
-}
// ── getLeafText ────────────────────────────────────────────────────
@@ -233,7 +230,6 @@ describe('unwrapAllByTagName', () => {
const doc = makeDoc(
'AB',
);
- const body = getBody(doc);
const wp = doc.getElementsByTagName('w:p')[0]!;
const count = unwrapAllByTagName(wp, 'w:ins');
expect(count).toBe(2);
diff --git a/packages/docx-core/test-primitives/extract_revisions.test.ts b/packages/docx-core/src/primitives/extract_revisions.test.ts
similarity index 96%
rename from packages/docx-core/test-primitives/extract_revisions.test.ts
rename to packages/docx-core/src/primitives/extract_revisions.test.ts
index 7ba109b..e4f1596 100644
--- a/packages/docx-core/test-primitives/extract_revisions.test.ts
+++ b/packages/docx-core/src/primitives/extract_revisions.test.ts
@@ -1,10 +1,10 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML } from '../src/primitives/namespaces.js';
-import { extractRevisions } from '../src/primitives/extract_revisions.js';
-import { insertParagraphBookmarks } from '../src/primitives/bookmarks.js';
-import type { Comment } from '../src/primitives/comments.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { parseXml } from './xml.js';
+import { OOXML } from './namespaces.js';
+import { extractRevisions } from './extract_revisions.js';
+import { insertParagraphBookmarks } from './bookmarks.js';
+import type { Comment } from './comments.js';
const W_NS = OOXML.W_NS;
diff --git a/packages/docx-core/test-primitives/footnotes.test.ts b/packages/docx-core/src/primitives/footnotes.test.ts
similarity index 98%
rename from packages/docx-core/test-primitives/footnotes.test.ts
rename to packages/docx-core/src/primitives/footnotes.test.ts
index 297f915..d808846 100644
--- a/packages/docx-core/test-primitives/footnotes.test.ts
+++ b/packages/docx-core/src/primitives/footnotes.test.ts
@@ -1,7 +1,7 @@
import { describe, expect } from 'vitest';
import JSZip from 'jszip';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
+import { OOXML, W } from './namespaces.js';
+import { parseXml, serializeXml } from './xml.js';
import {
addFootnote,
bootstrapFootnoteParts,
@@ -10,11 +10,11 @@ import {
getFootnotes,
isReservedFootnote,
updateFootnoteText,
-} from '../src/primitives/footnotes.js';
-import { DocxZip } from '../src/primitives/zip.js';
-import { getParagraphBookmarkId, insertParagraphBookmarks } from '../src/primitives/bookmarks.js';
-import { getParagraphText } from '../src/primitives/text.js';
-import { allureJsonAttachment, allureStep, testAllure } from './helpers/allure-test.js';
+} from './footnotes.js';
+import { DocxZip } from './zip.js';
+import { getParagraphBookmarkId, insertParagraphBookmarks } from './bookmarks.js';
+import { getParagraphText } from './text.js';
+import { allureJsonAttachment, allureStep, testAllure } from './testing/allure-test.js';
const TEST_FEATURE = 'add-footnote-support';
const test = testAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE });
diff --git a/packages/docx-core/test-primitives/formatting_tags.test.ts b/packages/docx-core/src/primitives/formatting_tags.test.ts
similarity index 73%
rename from packages/docx-core/test-primitives/formatting_tags.test.ts
rename to packages/docx-core/src/primitives/formatting_tags.test.ts
index 0000aeb..a828cea 100644
--- a/packages/docx-core/test-primitives/formatting_tags.test.ts
+++ b/packages/docx-core/src/primitives/formatting_tags.test.ts
@@ -1,5 +1,7 @@
import { describe, expect } from 'vitest';
-import { testAllure as test } from './helpers/allure-test.js';
+import { testAllure, allureStep } from './testing/allure-test.js';
+
+const test = testAllure;
import {
computeModalBaseline,
@@ -7,9 +9,8 @@ import {
emitFormattingTags,
type AnnotatedRun,
type FormattingBaseline,
- type FontBaseline,
-} from '../src/primitives/formatting_tags.js';
-import type { RunFormatting } from '../src/primitives/styles.js';
+} from './formatting_tags.js';
+import type { RunFormatting } from './styles.js';
function runFormatting(partial?: Partial): RunFormatting {
return {
@@ -220,3 +221,74 @@ describe('formatting_tags', () => {
expect(tagged).not.toContain('face=');
});
});
+
+const TEST_FEATURE = 'add-run-level-formatting-visibility';
+
+const humanReadableTest = testAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE }).allure({
+ tags: ['human-readable'],
+ parameters: { audience: 'non-technical' },
+});
+
+describe('formatting_tags — OpenSpec traceability', () => {
+ humanReadableTest.openspec('char-weighted modal baseline selects dominant formatting tuple')(
+ 'char-weighted modal baseline selects dominant formatting tuple',
+ async () => {
+ const { baseline } = await allureStep('Given 10 bold chars and 4 plain chars', () => {
+ const r: AnnotatedRun[] = [
+ {
+ text: 'AAAAAAAAAA',
+ formatting: { bold: true, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
+ hyperlinkUrl: null,
+ charCount: 10,
+ isHeaderRun: false,
+ },
+ {
+ text: 'BBBB',
+ formatting: { bold: false, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
+ hyperlinkUrl: null,
+ charCount: 4,
+ isHeaderRun: false,
+ },
+ ];
+ return { runs: r, baseline: computeModalBaseline(r) };
+ });
+
+ await allureStep('Then the modal baseline selects bold=true (dominant) with suppression enabled', () => {
+ expect(baseline.bold).toBe(true);
+ expect(baseline.italic).toBe(false);
+ expect(baseline.underline).toBe(false);
+ expect(baseline.suppressed).toBe(true);
+ });
+ },
+ );
+
+ humanReadableTest.openspec('tie-break by earliest run when modal weights are equal')(
+ 'tie-break by earliest run when modal weights are equal',
+ async () => {
+ const baseline = await allureStep('Given two runs with equal char weight (6 bold + 6 plain)', () => {
+ const runs: AnnotatedRun[] = [
+ {
+ text: 'AAAAAA',
+ formatting: { bold: true, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
+ hyperlinkUrl: null,
+ charCount: 6,
+ isHeaderRun: false,
+ },
+ {
+ text: 'BBBBBB',
+ formatting: { bold: false, italic: false, underline: false, highlightVal: null, fontName: 'Arial', fontSizePt: 12, colorHex: null },
+ hyperlinkUrl: null,
+ charCount: 6,
+ isHeaderRun: false,
+ },
+ ];
+ return computeModalBaseline(runs);
+ });
+
+ await allureStep('Then tie-break selects earliest run (bold=true) with suppression disabled', () => {
+ expect(baseline.bold).toBe(true);
+ expect(baseline.suppressed).toBe(false);
+ });
+ },
+ );
+});
diff --git a/packages/docx-core/test-primitives/index.test.ts b/packages/docx-core/src/primitives/index.test.ts
similarity index 81%
rename from packages/docx-core/test-primitives/index.test.ts
rename to packages/docx-core/src/primitives/index.test.ts
index 7ba8f73..17edbff 100644
--- a/packages/docx-core/test-primitives/index.test.ts
+++ b/packages/docx-core/src/primitives/index.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import * as primitives from '../src/primitives/index.js';
+import { itAllure as it } from './testing/allure-test.js';
+import * as primitives from './index.js';
describe('index exports', () => {
it('re-exports key public surface symbols', () => {
diff --git a/packages/docx-core/test-primitives/layout.test.ts b/packages/docx-core/src/primitives/layout.test.ts
similarity index 95%
rename from packages/docx-core/test-primitives/layout.test.ts
rename to packages/docx-core/src/primitives/layout.test.ts
index 142dccb..cedd18b 100644
--- a/packages/docx-core/test-primitives/layout.test.ts
+++ b/packages/docx-core/src/primitives/layout.test.ts
@@ -1,9 +1,9 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { getParagraphBookmarkId, insertParagraphBookmarks } from '../src/primitives/bookmarks.js';
-import { setParagraphSpacing, setTableCellPadding, setTableRowHeight } from '../src/primitives/layout.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { parseXml } from './xml.js';
+import { OOXML, W } from './namespaces.js';
+import { getParagraphBookmarkId, insertParagraphBookmarks } from './bookmarks.js';
+import { setParagraphSpacing, setTableCellPadding, setTableRowHeight } from './layout.js';
function makeDoc(bodyXml: string): Document {
const xml =
diff --git a/packages/docx-core/test-primitives/layout.traceability.test.ts b/packages/docx-core/src/primitives/layout.traceability.test.ts
similarity index 95%
rename from packages/docx-core/test-primitives/layout.traceability.test.ts
rename to packages/docx-core/src/primitives/layout.traceability.test.ts
index 7c01c73..4d1cb7e 100644
--- a/packages/docx-core/test-primitives/layout.traceability.test.ts
+++ b/packages/docx-core/src/primitives/layout.traceability.test.ts
@@ -1,9 +1,9 @@
import { describe, expect } from 'vitest';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { getParagraphBookmarkId, insertParagraphBookmarks } from '../src/primitives/bookmarks.js';
-import { setParagraphSpacing, setTableCellPadding, setTableRowHeight } from '../src/primitives/layout.js';
-import { itAllure, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
+import { parseXml, serializeXml } from './xml.js';
+import { OOXML, W } from './namespaces.js';
+import { getParagraphBookmarkId, insertParagraphBookmarks } from './bookmarks.js';
+import { setParagraphSpacing, setTableCellPadding, setTableRowHeight } from './layout.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
const TEST_FEATURE = 'docx-primitives';
diff --git a/packages/docx-core/test-primitives/list_labels.test.ts b/packages/docx-core/src/primitives/list_labels.test.ts
similarity index 95%
rename from packages/docx-core/test-primitives/list_labels.test.ts
rename to packages/docx-core/src/primitives/list_labels.test.ts
index 048e8dc..df6f491 100644
--- a/packages/docx-core/test-primitives/list_labels.test.ts
+++ b/packages/docx-core/src/primitives/list_labels.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { extractListLabel, stripListLabel, LabelType } from '../src/primitives/list_labels.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { extractListLabel, stripListLabel, LabelType } from './list_labels.js';
describe('extractListLabel', () => {
// ── Letter labels ────────────────────────────────────────────────────
diff --git a/packages/docx-core/test-primitives/list_labels.traceability.test.ts b/packages/docx-core/src/primitives/list_labels.traceability.test.ts
similarity index 96%
rename from packages/docx-core/test-primitives/list_labels.traceability.test.ts
rename to packages/docx-core/src/primitives/list_labels.traceability.test.ts
index a9032cf..bb19edb 100644
--- a/packages/docx-core/test-primitives/list_labels.traceability.test.ts
+++ b/packages/docx-core/src/primitives/list_labels.traceability.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { extractListLabel, stripListLabel, LabelType } from '../src/primitives/list_labels.js';
-import { itAllure, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
+import { extractListLabel, stripListLabel, LabelType } from './list_labels.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
const TEST_FEATURE = 'docx-primitives';
diff --git a/packages/docx-core/test-primitives/matching.test.ts b/packages/docx-core/src/primitives/matching.test.ts
similarity index 95%
rename from packages/docx-core/test-primitives/matching.test.ts
rename to packages/docx-core/src/primitives/matching.test.ts
index 76d00b3..3ff05f6 100644
--- a/packages/docx-core/test-primitives/matching.test.ts
+++ b/packages/docx-core/src/primitives/matching.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { findUniqueSubstringMatch } from '../src/primitives/matching.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { findUniqueSubstringMatch } from './matching.js';
describe('findUniqueSubstringMatch', () => {
// ── Exact mode ───────────────────────────────────────────────────────
diff --git a/packages/docx-core/test-primitives/matching.traceability.test.ts b/packages/docx-core/src/primitives/matching.traceability.test.ts
similarity index 97%
rename from packages/docx-core/test-primitives/matching.traceability.test.ts
rename to packages/docx-core/src/primitives/matching.traceability.test.ts
index 0bba54a..0cd0b4f 100644
--- a/packages/docx-core/test-primitives/matching.traceability.test.ts
+++ b/packages/docx-core/src/primitives/matching.traceability.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { findUniqueSubstringMatch } from '../src/primitives/matching.js';
-import { itAllure, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
+import { findUniqueSubstringMatch } from './matching.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
const TEST_FEATURE = 'docx-primitives';
diff --git a/packages/docx-core/test-primitives/merge_runs.test.ts b/packages/docx-core/src/primitives/merge_runs.test.ts
similarity index 61%
rename from packages/docx-core/test-primitives/merge_runs.test.ts
rename to packages/docx-core/src/primitives/merge_runs.test.ts
index e9c36cc..05067f5 100644
--- a/packages/docx-core/test-primitives/merge_runs.test.ts
+++ b/packages/docx-core/src/primitives/merge_runs.test.ts
@@ -1,8 +1,10 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { mergeRuns } from '../src/primitives/merge_runs.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
+
+const it = itAllure;
+import { parseXml } from './xml.js';
+import { OOXML, W } from './namespaces.js';
+import { mergeRuns } from './merge_runs.js';
const W_NS = OOXML.W_NS;
@@ -306,3 +308,136 @@ describe('merge_runs', () => {
});
});
});
+
+const TEST_FEATURE = 'add-auto-normalization-on-open';
+
+const humanReadableIt = itAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE }).allure({
+ tags: ['human-readable'],
+ parameters: { audience: 'non-technical' },
+});
+
+describe('Traceability: Auto-Normalization on Open — Run Merging', () => {
+ humanReadableIt.openspec('merge adjacent runs with equivalent formatting')('Scenario: merge adjacent runs with equivalent formatting', async () => {
+ const doc = makeDoc(
+ '' +
+ 'Hello ' +
+ 'World' +
+ '',
+ );
+
+ const result = await allureStep('When merge_runs is called on adjacent format-identical runs', async () => {
+ const r = mergeRuns(doc);
+ await allureJsonAttachment('merge_runs result', r);
+ return r;
+ });
+
+ await allureStep('Then the adjacent runs SHALL be consolidated into a single run', () => {
+ expect(result.runsMerged).toBeGreaterThanOrEqual(1);
+ expect(countRuns(doc)).toBe(1);
+ });
+
+ await allureStep('And the merged run SHALL preserve the original visible text and formatting', () => {
+ expect(bodyText(doc)).toBe('Hello World');
+ const rPr = doc.getElementsByTagNameNS(W_NS, W.rPr).item(0);
+ expect(rPr).toBeTruthy();
+ expect(rPr!.getElementsByTagNameNS(W_NS, W.b).length).toBe(1);
+ });
+ });
+
+ humanReadableIt.openspec('never merge across field boundaries')('Scenario: never merge across field boundaries', async () => {
+ const doc = makeDoc(
+ '' +
+ 'Before' +
+ '' +
+ ' MERGEFIELD Name ' +
+ '' +
+ 'Value' +
+ '' +
+ 'After' +
+ '',
+ );
+
+ const result = await allureStep('When merge_runs is called on runs separated by fldChar elements', async () => {
+ const r = mergeRuns(doc);
+ await allureJsonAttachment('merge_runs result', r);
+ return r;
+ });
+
+ await allureStep('Then the runs SHALL NOT be merged across the field boundary', () => {
+ expect(result.runsMerged).toBe(0);
+ });
+
+ await allureStep('And field structure SHALL remain intact', () => {
+ expect(countRuns(doc)).toBe(7);
+ expect(bodyText(doc)).toContain('Before');
+ expect(bodyText(doc)).toContain('After');
+ });
+ });
+
+ humanReadableIt.openspec('never merge across comment range boundaries')('Scenario: never merge across comment range boundaries', async () => {
+ const doc = makeDoc(
+ '' +
+ 'Before' +
+ '' +
+ 'After' +
+ '' +
+ '',
+ );
+
+ const result = await allureStep('When merge_runs is called on runs separated by comment range markers', async () => {
+ const r = mergeRuns(doc);
+ await allureJsonAttachment('merge_runs result', r);
+ return r;
+ });
+
+ await allureStep('Then the runs SHALL NOT be merged across comment range boundaries', () => {
+ expect(result.runsMerged).toBe(0);
+ expect(countRuns(doc)).toBe(2);
+ });
+ });
+
+ humanReadableIt.openspec('never merge across bookmark boundaries')('Scenario: never merge across bookmark boundaries', async () => {
+ const doc = makeDoc(
+ '' +
+ 'Before' +
+ '' +
+ 'After' +
+ '' +
+ '',
+ );
+
+ const result = await allureStep('When merge_runs is called on runs separated by bookmark markers', async () => {
+ const r = mergeRuns(doc);
+ await allureJsonAttachment('merge_runs result', r);
+ return r;
+ });
+
+ await allureStep('Then the runs SHALL NOT be merged across bookmark boundaries', () => {
+ expect(result.runsMerged).toBe(0);
+ expect(countRuns(doc)).toBe(2);
+ });
+ });
+
+ humanReadableIt.openspec('never merge across tracked-change wrapper boundaries')('Scenario: never merge across tracked-change wrapper boundaries', async () => {
+ const doc = makeDoc(
+ '' +
+ '' +
+ 'Inserted1' +
+ '' +
+ '' +
+ 'Deleted1' +
+ '' +
+ '',
+ );
+
+ const result = await allureStep('When merge_runs is called on runs in different tracked-change wrappers', async () => {
+ const r = mergeRuns(doc);
+ await allureJsonAttachment('merge_runs result', r);
+ return r;
+ });
+
+ await allureStep('Then runs in different tracked-change wrappers SHALL NOT be merged', () => {
+ expect(result.runsMerged).toBe(0);
+ });
+ });
+});
diff --git a/packages/docx-core/test-primitives/numbering.test.ts b/packages/docx-core/src/primitives/numbering.test.ts
similarity index 97%
rename from packages/docx-core/test-primitives/numbering.test.ts
rename to packages/docx-core/src/primitives/numbering.test.ts
index b479477..8f2e8dd 100644
--- a/packages/docx-core/test-primitives/numbering.test.ts
+++ b/packages/docx-core/src/primitives/numbering.test.ts
@@ -1,11 +1,11 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml } from '../src/primitives/xml.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { parseXml } from './xml.js';
import {
computeListLabelForParagraph,
parseNumberingXml,
type NumberingCounters,
-} from '../src/primitives/numbering.js';
+} from './numbering.js';
const W_NS = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main';
diff --git a/packages/docx-core/test-primitives/prevent_double_elevation.test.ts b/packages/docx-core/src/primitives/prevent_double_elevation.test.ts
similarity index 98%
rename from packages/docx-core/test-primitives/prevent_double_elevation.test.ts
rename to packages/docx-core/src/primitives/prevent_double_elevation.test.ts
index ad0ccae..e92cf74 100644
--- a/packages/docx-core/test-primitives/prevent_double_elevation.test.ts
+++ b/packages/docx-core/src/primitives/prevent_double_elevation.test.ts
@@ -1,13 +1,13 @@
import { describe, expect } from 'vitest';
-import { OOXML } from '../src/primitives/namespaces.js';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
-import { preventDoubleElevation } from '../src/primitives/prevent_double_elevation.js';
+import { OOXML } from './namespaces.js';
+import { parseXml, serializeXml } from './xml.js';
+import { preventDoubleElevation } from './prevent_double_elevation.js';
import {
type AllureBddContext,
allureJsonAttachment,
allureStep,
testAllure,
-} from './helpers/allure-test.js';
+} from './testing/allure-test.js';
const W_NS = OOXML.W_NS;
const TEST_FEATURE = 'prevent-footnote-double-elevation';
diff --git a/packages/docx-core/test-primitives/reject_changes.test.ts b/packages/docx-core/src/primitives/reject_changes.test.ts
similarity index 94%
rename from packages/docx-core/test-primitives/reject_changes.test.ts
rename to packages/docx-core/src/primitives/reject_changes.test.ts
index 36037ae..cfa6753 100644
--- a/packages/docx-core/test-primitives/reject_changes.test.ts
+++ b/packages/docx-core/src/primitives/reject_changes.test.ts
@@ -1,9 +1,9 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML } from '../src/primitives/namespaces.js';
-import { rejectChanges } from '../src/primitives/reject_changes.js';
-import { getParagraphText } from '../src/primitives/text.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { parseXml } from './xml.js';
+import { OOXML } from './namespaces.js';
+import { rejectChanges } from './reject_changes.js';
+import { getParagraphText } from './text.js';
const W_NS = OOXML.W_NS;
diff --git a/packages/docx-core/test-primitives/relationships.test.ts b/packages/docx-core/src/primitives/relationships.test.ts
similarity index 91%
rename from packages/docx-core/test-primitives/relationships.test.ts
rename to packages/docx-core/src/primitives/relationships.test.ts
index f72acbb..5833483 100644
--- a/packages/docx-core/test-primitives/relationships.test.ts
+++ b/packages/docx-core/src/primitives/relationships.test.ts
@@ -1,7 +1,7 @@
import { describe, expect } from 'vitest';
-import { testAllure as test } from './helpers/allure-test.js';
-import { parseDocumentRels } from '../src/primitives/relationships.js';
-import { parseXml } from '../src/primitives/xml.js';
+import { testAllure as test } from './testing/allure-test.js';
+import { parseDocumentRels } from './relationships.js';
+import { parseXml } from './xml.js';
function makeRelsXml(rels: string): Document {
return parseXml(
diff --git a/packages/docx-core/test-primitives/semantic_tags.test.ts b/packages/docx-core/src/primitives/semantic_tags.test.ts
similarity index 91%
rename from packages/docx-core/test-primitives/semantic_tags.test.ts
rename to packages/docx-core/src/primitives/semantic_tags.test.ts
index 7b92b7f..5b7a615 100644
--- a/packages/docx-core/test-primitives/semantic_tags.test.ts
+++ b/packages/docx-core/src/primitives/semantic_tags.test.ts
@@ -1,10 +1,10 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
+import { itAllure as it } from './testing/allure-test.js';
import {
stripHighlightTags,
hasHighlightTags,
HIGHLIGHT_TAG,
-} from '../src/primitives/semantic_tags.js';
+} from './semantic_tags.js';
describe('stripHighlightTags', () => {
it('removes highlight tags leaving content intact', () => {
diff --git a/packages/docx-core/test-primitives/semantic_tags.traceability.test.ts b/packages/docx-core/src/primitives/semantic_tags.traceability.test.ts
similarity index 92%
rename from packages/docx-core/test-primitives/semantic_tags.traceability.test.ts
rename to packages/docx-core/src/primitives/semantic_tags.traceability.test.ts
index a76315a..ae77674 100644
--- a/packages/docx-core/test-primitives/semantic_tags.traceability.test.ts
+++ b/packages/docx-core/src/primitives/semantic_tags.traceability.test.ts
@@ -1,8 +1,8 @@
import { describe, expect } from 'vitest';
import {
stripHighlightTags,
-} from '../src/primitives/semantic_tags.js';
-import { itAllure, allureStep } from './helpers/allure-test.js';
+} from './semantic_tags.js';
+import { itAllure, allureStep } from './testing/allure-test.js';
const TEST_FEATURE = 'docx-primitives';
diff --git a/packages/docx-core/src/primitives/semantic_tags.unit.test.ts b/packages/docx-core/src/primitives/semantic_tags.unit.test.ts
index 9e13d9c..4b537d8 100644
--- a/packages/docx-core/src/primitives/semantic_tags.unit.test.ts
+++ b/packages/docx-core/src/primitives/semantic_tags.unit.test.ts
@@ -1,5 +1,5 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from '../testing/allure-test.js';
+import { itAllure as it } from './testing/allure-test.js';
import {
hasFormattingTags,
stripFormattingTags,
diff --git a/packages/docx-core/test-primitives/simplify_redlines.test.ts b/packages/docx-core/src/primitives/simplify_redlines.test.ts
similarity index 62%
rename from packages/docx-core/test-primitives/simplify_redlines.test.ts
rename to packages/docx-core/src/primitives/simplify_redlines.test.ts
index 505c26c..dcc44c2 100644
--- a/packages/docx-core/test-primitives/simplify_redlines.test.ts
+++ b/packages/docx-core/src/primitives/simplify_redlines.test.ts
@@ -1,8 +1,10 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { simplifyRedlines } from '../src/primitives/simplify_redlines.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
+
+const it = itAllure;
+import { parseXml } from './xml.js';
+import { OOXML, W } from './namespaces.js';
+import { simplifyRedlines } from './simplify_redlines.js';
const W_NS = OOXML.W_NS;
@@ -223,3 +225,98 @@ describe('simplify_redlines', () => {
});
});
});
+
+const TEST_FEATURE = 'add-auto-normalization-on-open';
+
+const humanReadableIt = itAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE }).allure({
+ tags: ['human-readable'],
+ parameters: { audience: 'non-technical' },
+});
+
+describe('Traceability: Auto-Normalization on Open — Redline Simplification', () => {
+ humanReadableIt.openspec('merge adjacent same-author same-type tracked-change wrappers')('Scenario: merge adjacent same-author same-type tracked-change wrappers', async () => {
+ const doc = makeDoc(
+ '' +
+ '' +
+ 'Hello ' +
+ '' +
+ '' +
+ 'World' +
+ '' +
+ '',
+ );
+
+ const result = await allureStep('When simplify_redlines is called on adjacent same-author w:ins wrappers', async () => {
+ const r = simplifyRedlines(doc);
+ await allureJsonAttachment('simplify_redlines result', r);
+ return r;
+ });
+
+ await allureStep('Then the adjacent wrappers SHALL be consolidated into a single wrapper', () => {
+ expect(result.wrappersConsolidated).toBeGreaterThanOrEqual(1);
+ expect(countWrappers(doc, 'ins')).toBe(1);
+ });
+
+ await allureStep('And the merged wrapper SHALL preserve all child content', () => {
+ expect(bodyText(doc)).toBe('Hello World');
+ });
+ });
+
+ humanReadableIt.openspec('never merge wrappers from different authors')('Scenario: never merge wrappers from different authors', async () => {
+ const doc = makeDoc(
+ '' +
+ '' +
+ 'Alice text' +
+ '' +
+ '' +
+ 'Bob text' +
+ '' +
+ '',
+ );
+
+ const result = await allureStep('When simplify_redlines is called on adjacent different-author wrappers', async () => {
+ const r = simplifyRedlines(doc);
+ await allureJsonAttachment('simplify_redlines result', r);
+ return r;
+ });
+
+ await allureStep('Then the wrappers SHALL NOT be merged', () => {
+ expect(result.wrappersConsolidated).toBe(0);
+ expect(countWrappers(doc, 'ins')).toBe(2);
+ });
+
+ await allureStep('And author attribution SHALL be preserved', () => {
+ const wrappers = doc.getElementsByTagNameNS(W_NS, 'ins');
+ const authors = new Set();
+ for (let i = 0; i < wrappers.length; i++) {
+ authors.add(wrappers.item(i)!.getAttribute('w:author') ?? '');
+ }
+ expect(authors).toEqual(new Set(['Alice', 'Bob']));
+ });
+ });
+
+ humanReadableIt.openspec('never merge across different change types')('Scenario: never merge across different change types', async () => {
+ const doc = makeDoc(
+ '' +
+ '' +
+ 'New' +
+ '' +
+ '' +
+ 'Old' +
+ '' +
+ '',
+ );
+
+ const result = await allureStep('When simplify_redlines is called on adjacent w:ins + w:del from same author', async () => {
+ const r = simplifyRedlines(doc);
+ await allureJsonAttachment('simplify_redlines result', r);
+ return r;
+ });
+
+ await allureStep('Then the wrappers SHALL NOT be merged', () => {
+ expect(result.wrappersConsolidated).toBe(0);
+ expect(countWrappers(doc, 'ins')).toBe(1);
+ expect(countWrappers(doc, 'del')).toBe(1);
+ });
+ });
+});
diff --git a/packages/docx-core/test-primitives/helpers/allure-test.ts b/packages/docx-core/src/primitives/testing/allure-test.ts
similarity index 91%
rename from packages/docx-core/test-primitives/helpers/allure-test.ts
rename to packages/docx-core/src/primitives/testing/allure-test.ts
index 6ff151a..66f2af2 100644
--- a/packages/docx-core/test-primitives/helpers/allure-test.ts
+++ b/packages/docx-core/src/primitives/testing/allure-test.ts
@@ -3,7 +3,7 @@ import {
type AllureBddContext as SharedAllureBddContext,
type AllureRuntime as SharedAllureRuntime,
type AllureStepContext as SharedAllureStepContext,
-} from '../../../../testing/allure-test-factory.js';
+} from '../../../../../testing/allure-test-factory.js';
type EpicName =
| 'DOCX Primitives';
diff --git a/packages/docx-core/src/primitives/text.test.ts b/packages/docx-core/src/primitives/text.test.ts
index 729e842..d1eab0f 100644
--- a/packages/docx-core/src/primitives/text.test.ts
+++ b/packages/docx-core/src/primitives/text.test.ts
@@ -1,5 +1,5 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from '../testing/allure-test.js';
+import { itAllure as it } from './testing/allure-test.js';
import { parseXml } from './xml.js';
import { OOXML, W } from './namespaces.js';
import { SafeDocxError } from './errors.js';
@@ -411,6 +411,24 @@ describe('replaceParagraphTextRange', () => {
expect(getParagraphText(p)).toBe('ABCXYZ');
});
+
+ it('throws UNSAFE_CONTAINER_BOUNDARY when replacement spans different run containers', () => {
+ const doc = makeDoc(
+ `` +
+ `Link` +
+ `Tail` +
+ ``,
+ );
+ const p = firstParagraph(doc);
+
+ expect(() => replaceParagraphTextRange(p, 2, 6, 'Changed')).toThrowError(SafeDocxError);
+ try {
+ replaceParagraphTextRange(p, 2, 6, 'Changed');
+ } catch (e: unknown) {
+ if (!(e instanceof SafeDocxError)) throw e;
+ expect(e.code).toBe('UNSAFE_CONTAINER_BOUNDARY');
+ }
+ });
});
// ── findOffsetInRuns (indirect via replaceParagraphTextRange) ────────
diff --git a/packages/docx-core/test-primitives/validate_document.test.ts b/packages/docx-core/src/primitives/validate_document.test.ts
similarity index 92%
rename from packages/docx-core/test-primitives/validate_document.test.ts
rename to packages/docx-core/src/primitives/validate_document.test.ts
index ce365dc..f6ec59c 100644
--- a/packages/docx-core/test-primitives/validate_document.test.ts
+++ b/packages/docx-core/src/primitives/validate_document.test.ts
@@ -1,8 +1,8 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { validateDocument } from '../src/primitives/validate_document.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { parseXml } from './xml.js';
+import { OOXML } from './namespaces.js';
+import { validateDocument } from './validate_document.js';
const W_NS = OOXML.W_NS;
@@ -61,8 +61,8 @@ describe('validate_document', () => {
expect(result.isValid).toBe(false);
expect(result.warnings).toHaveLength(1);
- expect(result.warnings[0].code).toBe('ORPHANED_BOOKMARK_START');
- expect(result.warnings[0].message).toContain('orphan_start');
+ expect(result.warnings[0]!.code).toBe('ORPHANED_BOOKMARK_START');
+ expect(result.warnings[0]!.message).toContain('orphan_start');
});
it('detects bookmarkEnd without matching bookmarkStart', () => {
@@ -77,7 +77,7 @@ describe('validate_document', () => {
expect(result.isValid).toBe(false);
expect(result.warnings).toHaveLength(1);
- expect(result.warnings[0].code).toBe('ORPHANED_BOOKMARK_END');
+ expect(result.warnings[0]!.code).toBe('ORPHANED_BOOKMARK_END');
});
it('does not flag matched bookmark pairs', () => {
@@ -111,7 +111,7 @@ describe('validate_document', () => {
expect(result.isValid).toBe(false);
const malformed = result.warnings.filter(w => w.code === 'MALFORMED_TRACKED_CHANGE');
expect(malformed.length).toBeGreaterThanOrEqual(1);
- expect(malformed[0].message).toContain('w:author');
+ expect(malformed[0]!.message).toContain('w:author');
});
it('detects w:del missing w:date attribute', () => {
@@ -128,7 +128,7 @@ describe('validate_document', () => {
expect(result.isValid).toBe(false);
const malformed = result.warnings.filter(w => w.code === 'MALFORMED_TRACKED_CHANGE');
expect(malformed.length).toBeGreaterThanOrEqual(1);
- expect(malformed[0].message).toContain('w:date');
+ expect(malformed[0]!.message).toContain('w:date');
});
it('detects empty tracked-change wrapper', () => {
diff --git a/packages/docx-core/test-primitives/xml.test.ts b/packages/docx-core/src/primitives/xml.test.ts
similarity index 93%
rename from packages/docx-core/test-primitives/xml.test.ts
rename to packages/docx-core/src/primitives/xml.test.ts
index a900dce..f84253c 100644
--- a/packages/docx-core/test-primitives/xml.test.ts
+++ b/packages/docx-core/src/primitives/xml.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml, serializeXml, textContent } from '../src/primitives/xml.js';
+import { itAllure as it } from './testing/allure-test.js';
+import { parseXml, serializeXml, textContent } from './xml.js';
describe('parseXml', () => {
it('parses valid XML and returns a Document', () => {
diff --git a/packages/docx-core/test-primitives/xml.traceability.test.ts b/packages/docx-core/src/primitives/xml.traceability.test.ts
similarity index 95%
rename from packages/docx-core/test-primitives/xml.traceability.test.ts
rename to packages/docx-core/src/primitives/xml.traceability.test.ts
index 7ff24f1..472aaee 100644
--- a/packages/docx-core/test-primitives/xml.traceability.test.ts
+++ b/packages/docx-core/src/primitives/xml.traceability.test.ts
@@ -1,6 +1,6 @@
import { describe, expect } from 'vitest';
-import { parseXml, serializeXml, textContent } from '../src/primitives/xml.js';
-import { itAllure, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
+import { parseXml, serializeXml, textContent } from './xml.js';
+import { itAllure, allureStep, allureJsonAttachment } from './testing/allure-test.js';
const TEST_FEATURE = 'docx-primitives';
diff --git a/packages/docx-core/test-primitives/DOCX_PRIMITIVES_OPENSPEC_TRACEABILITY.md b/packages/docx-core/test-primitives/DOCX_PRIMITIVES_OPENSPEC_TRACEABILITY.md
deleted file mode 100644
index ba70e7b..0000000
--- a/packages/docx-core/test-primitives/DOCX_PRIMITIVES_OPENSPEC_TRACEABILITY.md
+++ /dev/null
@@ -1,124 +0,0 @@
-# DOCX Primitives TS OpenSpec Traceability Matrix
-
-> Auto-generated by `packages/docx-primitives/scripts/validate_openspec_coverage.mjs`.
-> Do not hand-edit this file.
-
-## Canonical Spec Coverage
-
-| Scenario | Status | Allure Test Files | Notes |
-|---|---|---|---|
-| accept deletions by removing w:del elements and content | covered | `src/accept_changes.test.ts` | |
-| accept insertions by unwrapping w:ins wrappers | covered | `src/accept_changes.test.ts` | |
-| accept moves by keeping destination and removing source | covered | `src/accept_changes.test.ts` | |
-| accept property changes by removing change records | covered | `src/accept_changes.test.ts` | |
-| bottom-up processing resolves nested revisions | covered | `src/accept_changes.test.ts` | |
-| emit definition tags for quoted term before definition verb | covered | `src/semantic_tags.traceability.test.ts` | |
-| emit definition tags for smart/curly quotes | covered | `src/semantic_tags.traceability.test.ts` | |
-| exact match found for literal substring | covered | `src/matching.traceability.test.ts` | |
-| exact mode preferred over quote_normalized when both match | covered | `src/matching.traceability.test.ts` | |
-| extract article labels with roman numeral support | covered | `src/list_labels.traceability.test.ts` | |
-| extract multi-char roman numeral labels | covered | `src/list_labels.traceability.test.ts` | |
-| extract numbered heading labels | covered | `src/list_labels.traceability.test.ts` | |
-| extract parenthesized letter labels | covered | `src/list_labels.traceability.test.ts` | |
-| extract section labels with sub-paragraph support | covered | `src/list_labels.traceability.test.ts` | |
-| flexible_whitespace matches across spacing variance | covered | `src/matching.traceability.test.ts` | |
-| getParagraphBookmarkId retrieves minted ID | covered | `src/bookmarks.test.ts` | |
-| insertParagraphBookmarks mints IDs matching expected pattern | covered | `src/bookmarks.test.ts` | |
-| merge adjacent runs with equivalent formatting | covered | `src/normalization.test.ts` | |
-| merge adjacent same-author same-type tracked-change wrappers | covered | `src/normalization.test.ts` | |
-| multiple when needle appears more than once | covered | `src/matching.traceability.test.ts` | |
-| namespaced XML preserved through round-trip | covered | `src/xml.traceability.test.ts` | |
-| never merge across bookmark boundaries | covered | `src/normalization.test.ts` | |
-| never merge across comment range boundaries | covered | `src/normalization.test.ts` | |
-| never merge across different change types | covered | `src/normalization.test.ts` | |
-| never merge across field boundaries | covered | `src/normalization.test.ts` | |
-| never merge across tracked-change wrapper boundaries | covered | `src/normalization.test.ts` | |
-| never merge wrappers from different authors | covered | `src/normalization.test.ts` | |
-| no tags emitted for text without definitions | covered | `src/semantic_tags.traceability.test.ts` | |
-| not_found for empty needle | covered | `src/matching.traceability.test.ts` | |
-| not_found when needle is absent | covered | `src/matching.traceability.test.ts` | |
-| null label for plain text without list patterns | covered | `src/list_labels.traceability.test.ts` | |
-| orphaned moves handled with safe fallback | covered | `src/accept_changes.test.ts` | |
-| parse and serialize preserves element structure | covered | `src/xml.traceability.test.ts` | |
-| quote_normalized matches curly quotes against straight quotes | covered | `src/matching.traceability.test.ts` | |
-| quote_optional matches quoted and unquoted term references | covered | `src/matching.traceability.test.ts` | |
-| setParagraphSpacing creates missing pPr and spacing containers | covered | `src/layout.traceability.test.ts` | |
-| setParagraphSpacing preserves unrelated formatting nodes | covered | `src/layout.traceability.test.ts` | |
-| setTableCellPadding creates tcPr and tcMar containers | covered | `src/layout.traceability.test.ts` | |
-| setTableRowHeight reports missing indexes | covered | `src/layout.traceability.test.ts` | |
-| single-char roman-like letters classified as LETTER not ROMAN | covered | `src/list_labels.traceability.test.ts` | |
-| strip definition tags replaces with quotes | covered | `src/semantic_tags.traceability.test.ts` | |
-| strip highlight tags leaves content intact | covered | `src/semantic_tags.traceability.test.ts` | |
-| stripListLabel removes label and leading whitespace | covered | `src/list_labels.traceability.test.ts` | |
-| textContent returns concatenated text of nested elements | covered | `src/xml.traceability.test.ts` | |
-| textContent returns empty string for null or undefined input | covered | `src/xml.traceability.test.ts` | |
-
-### Excluded (tested in docx-comparison)
-
-- Accept-all and reject-all preserve semantic read-text parity
-- Created inplace paragraphs retain bookmark boundary markers
-- Inplace downgrades when semantic bookmark parity fails
-- Inplace remains valid when bookmark IDs are remapped but semantics are preserved
-- Structural diagnostics remain equivalent across round-trip projections
-
-## Change Delta Coverage
-
-### Change: `add-accept-tracked-changes`
-
-| Scenario | Status | Allure Test Files | Notes |
-|---|---|---|---|
-| accept deletions by removing w:del elements and content | covered | `src/accept_changes.test.ts` | |
-| accept insertions by unwrapping w:ins wrappers | covered | `src/accept_changes.test.ts` | |
-| accept moves by keeping destination and removing source | covered | `src/accept_changes.test.ts` | |
-| accept property changes by removing change records | covered | `src/accept_changes.test.ts` | |
-| bottom-up processing resolves nested revisions | covered | `src/accept_changes.test.ts` | |
-| orphaned moves handled with safe fallback | covered | `src/accept_changes.test.ts` | |
-
-### Change: `add-auto-normalization-on-open`
-
-| Scenario | Status | Allure Test Files | Notes |
-|---|---|---|---|
-| merge adjacent runs with equivalent formatting | covered | `src/normalization.test.ts` | |
-| merge adjacent same-author same-type tracked-change wrappers | covered | `src/normalization.test.ts` | |
-| never merge across bookmark boundaries | covered | `src/normalization.test.ts` | |
-| never merge across comment range boundaries | covered | `src/normalization.test.ts` | |
-| never merge across different change types | covered | `src/normalization.test.ts` | |
-| never merge across field boundaries | covered | `src/normalization.test.ts` | |
-| never merge across tracked-change wrapper boundaries | covered | `src/normalization.test.ts` | |
-| never merge wrappers from different authors | covered | `src/normalization.test.ts` | |
-
-### Change: `add-footnote-support`
-
-| Scenario | Status | Allure Test Files | Notes |
-|---|---|---|---|
-| ID allocation skips reserved entries by type | covered | `src/footnotes.test.ts` | |
-| add footnote after specific text with mid-run split | covered | `src/footnotes.test.ts` | |
-| add footnote at end of paragraph | covered | `src/footnotes.test.ts` | |
-| anchored paragraph IDs resolved | covered | `src/footnotes.test.ts` | |
-| bootstrap creates footnotes.xml when missing | covered | `src/footnotes.test.ts` | |
-| bootstrap is idempotent | covered | `src/footnotes.test.ts` | |
-| bootstrap preserves existing reserved entries | covered | `src/footnotes.test.ts` | |
-| delete refuses reserved type entries | covered | `src/footnotes.test.ts` | |
-| delete removes dedicated reference run | covered | `src/footnotes.test.ts` | |
-| delete removes footnoteReference from mixed run without losing text | covered | `src/footnotes.test.ts` | |
-| display numbers follow document order | covered | `src/footnotes.test.ts` | |
-| footnote body includes Word-compatible skeleton | covered | `src/footnotes.test.ts` | |
-| mixed-run references handled | covered | `src/footnotes.test.ts` | |
-| read footnotes from document with multiple footnotes | covered | `src/footnotes.test.ts` | |
-| read from empty document returns empty array | covered | `src/footnotes.test.ts` | |
-| round-trip preserves footnotes | covered | `src/footnotes.test.ts` | |
-| update changes text content | covered | `src/footnotes.test.ts` | |
-| update preserves other footnotes | covered | `src/footnotes.test.ts` | |
-
-### Change: `add-run-level-formatting-visibility`
-
-| Scenario | Status | Allure Test Files | Notes |
-|---|---|---|---|
-| char-weighted modal baseline selects dominant formatting tuple | covered | `src/document_view_formatting.test.ts` | |
-| detect hyperlink runs and extract href | covered | `src/document_view_formatting.test.ts` | |
-| extract bold, italic, underline, highlighting tuple per run | covered | `src/document_view_formatting.test.ts` | |
-| suppression disabled when baseline coverage below 60% | covered | `src/document_view_formatting.test.ts` | |
-| tags nested in consistent order | covered | `src/document_view_formatting.test.ts` | |
-| tie-break by earliest run when modal weights are equal | covered | `src/document_view_formatting.test.ts` | |
-
-
diff --git a/packages/docx-core/test-primitives/comments.test.ts b/packages/docx-core/test-primitives/comments.test.ts
deleted file mode 100644
index 21ead76..0000000
--- a/packages/docx-core/test-primitives/comments.test.ts
+++ /dev/null
@@ -1,641 +0,0 @@
-import { describe, expect } from 'vitest';
-import { itAllure as it, allureStep } from './helpers/allure-test.js';
-import JSZip from 'jszip';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { DocxZip } from '../src/primitives/zip.js';
-import { DocxDocument } from '../src/primitives/document.js';
-import { bootstrapCommentParts, addComment, addCommentReply, getComments, getComment } from '../src/primitives/comments.js';
-
-const W_NS = OOXML.W_NS;
-const W15_NS = OOXML.W15_NS;
-
-function makeDocXml(bodyXml: string): string {
- return (
- `` +
- `` +
- `${bodyXml}` +
- ``
- );
-}
-
-async function makeDocxBuffer(bodyXml: string, extraFiles?: Record): Promise {
- const zip = new JSZip();
- zip.file('word/document.xml', makeDocXml(bodyXml));
- if (extraFiles) {
- for (const [name, text] of Object.entries(extraFiles)) zip.file(name, text);
- }
- return (await zip.generateAsync({ type: 'nodebuffer' })) as Buffer;
-}
-
-async function loadZip(buffer: Buffer): Promise {
- return DocxZip.load(buffer);
-}
-
-describe('comments', () => {
- describe('bootstrapCommentParts', () => {
- it('creates all required comment parts when none exist', async () => {
- const buf = await makeDocxBuffer('Hello');
- const zip = await loadZip(buf);
-
- expect(zip.hasFile('word/comments.xml')).toBe(false);
- expect(zip.hasFile('word/commentsExtended.xml')).toBe(false);
- expect(zip.hasFile('word/people.xml')).toBe(false);
-
- const result = await bootstrapCommentParts(zip);
-
- expect(result.partsCreated).toContain('word/comments.xml');
- expect(result.partsCreated).toContain('word/commentsExtended.xml');
- expect(result.partsCreated).toContain('word/people.xml');
- expect(zip.hasFile('word/comments.xml')).toBe(true);
- expect(zip.hasFile('word/commentsExtended.xml')).toBe(true);
- expect(zip.hasFile('word/people.xml')).toBe(true);
- });
-
- it('is idempotent — does not duplicate parts on second call', async () => {
- const buf = await makeDocxBuffer('Hello');
- const zip = await loadZip(buf);
-
- const first = await bootstrapCommentParts(zip);
- expect(first.partsCreated.length).toBe(3);
-
- const second = await bootstrapCommentParts(zip);
- expect(second.partsCreated.length).toBe(0);
- });
-
- it('adds correct Content-Type overrides', async () => {
- const buf = await makeDocxBuffer('Hello');
- const zip = await loadZip(buf);
-
- await bootstrapCommentParts(zip);
-
- const ctXml = await zip.readText('[Content_Types].xml');
- expect(ctXml).toContain('word/comments.xml');
- expect(ctXml).toContain('word/commentsExtended.xml');
- expect(ctXml).toContain('word/people.xml');
- });
-
- it('adds correct relationship entries', async () => {
- const buf = await makeDocxBuffer('Hello');
- const zip = await loadZip(buf);
-
- await bootstrapCommentParts(zip);
-
- const relsXml = await zip.readText('word/_rels/document.xml.rels');
- expect(relsXml).toContain('comments.xml');
- expect(relsXml).toContain('commentsExtended.xml');
- expect(relsXml).toContain('people.xml');
- });
- });
-
- describe('addComment', () => {
- it('inserts commentRangeStart/commentRangeEnd markers', async () => {
- const bodyXml = 'Hello World';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Test Author',
- text: 'A comment',
- });
-
- const serialized = serializeXml(doc);
- expect(serialized).toContain('commentRangeStart');
- expect(serialized).toContain('commentRangeEnd');
- });
-
- it('inserts commentReference run after range end', async () => {
- const bodyXml = 'Hello World';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Test Author',
- text: 'A comment',
- });
-
- const serialized = serializeXml(doc);
- expect(serialized).toContain('commentReference');
- });
-
- it('allocates sequential comment IDs', async () => {
- const bodyXml = 'Hello World Foo';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- const r1 = await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Author A',
- text: 'First',
- });
- const r2 = await addComment(doc, zip, {
- paragraphEl: p,
- start: 6,
- end: 11,
- author: 'Author B',
- text: 'Second',
- });
-
- expect(r1.commentId).toBe(0);
- expect(r2.commentId).toBe(1);
- });
-
- it('comment body includes annotationRef element', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Test',
- text: 'Note',
- });
-
- const commentsXml = await zip.readText('word/comments.xml');
- expect(commentsXml).toContain('annotationRef');
- expect(commentsXml).toContain('Note');
- });
-
- it('adds author to people.xml', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Jane Doe',
- text: 'Hi',
- });
-
- const peopleXml = await zip.readText('word/people.xml');
- expect(peopleXml).toContain('Jane Doe');
- });
- });
-
- describe('addCommentReply', () => {
- it('links reply to parent via commentsExtended.xml paraIdParent', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- // First add a root comment
- const root = await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root',
- });
-
- // Then add a reply
- const reply = await addCommentReply(doc, zip, {
- parentCommentId: root.commentId,
- author: 'Replier',
- text: 'Reply text',
- });
-
- expect(reply.parentCommentId).toBe(root.commentId);
-
- const extXml = await zip.readText('word/commentsExtended.xml');
- expect(extXml).toContain('paraIdParent');
- });
-
- it('reply has no range markers in document body', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- const root = await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root',
- });
-
- // Count range markers before reply
- const beforeXml = serializeXml(doc);
- const beforeStartCount = (beforeXml.match(/commentRangeStart/g) ?? []).length;
- const beforeEndCount = (beforeXml.match(/commentRangeEnd/g) ?? []).length;
-
- await addCommentReply(doc, zip, {
- parentCommentId: root.commentId,
- author: 'Replier',
- text: 'Reply',
- });
-
- // No new range markers added
- const afterXml = serializeXml(doc);
- const afterStartCount = (afterXml.match(/commentRangeStart/g) ?? []).length;
- const afterEndCount = (afterXml.match(/commentRangeEnd/g) ?? []).length;
- expect(afterStartCount).toBe(beforeStartCount);
- expect(afterEndCount).toBe(beforeEndCount);
- });
-
- it('supports multiple replies to same parent', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
-
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- const root = await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root',
- });
-
- const r1 = await addCommentReply(doc, zip, {
- parentCommentId: root.commentId,
- author: 'Reply1',
- text: 'First reply',
- });
-
- const r2 = await addCommentReply(doc, zip, {
- parentCommentId: root.commentId,
- author: 'Reply2',
- text: 'Second reply',
- });
-
- expect(r1.commentId).not.toBe(r2.commentId);
- expect(r1.parentCommentId).toBe(root.commentId);
- expect(r2.parentCommentId).toBe(root.commentId);
-
- const commentsXml = await zip.readText('word/comments.xml');
- expect(commentsXml).toContain('First reply');
- expect(commentsXml).toContain('Second reply');
- });
- });
-
- describe('round-trip', () => {
- it('comment survives toBuffer → reload cycle', async () => {
- const bodyXml = 'Hello World';
- const buf = await makeDocxBuffer(bodyXml);
- const doc = await DocxDocument.load(buf);
- doc.insertParagraphBookmarks('test_attachment');
-
- // Use readParagraphs to get the bookmark IDs
- const { paragraphs } = doc.readParagraphs();
- expect(paragraphs.length).toBeGreaterThan(0);
- const paraId = paragraphs[0]!.id;
- expect(paraId).toBeTruthy();
-
- await doc.addComment({
- paragraphId: paraId,
- start: 0,
- end: 5,
- author: 'Round Trip',
- text: 'Survives reload',
- });
-
- const { buffer } = await doc.toBuffer();
-
- // Reload and verify
- const reloadedZip = await DocxZip.load(buffer);
- const commentsXml = await reloadedZip.readText('word/comments.xml');
- expect(commentsXml).toContain('Survives reload');
- expect(commentsXml).toContain('Round Trip');
- });
- });
-
- describe('getComments', () => {
- it('returns empty array when no comments.xml exists', async () => {
-
- let comments: Awaited>;
-
- await allureStep('Given a document with no comment parts', async () => {
- const buf = await makeDocxBuffer('Hello');
- const zip = await loadZip(buf);
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- comments = await getComments(zip, doc);
- });
-
- await allureStep('Then getComments returns an empty array', async () => {
- expect(comments).toEqual([]);
- });
- });
-
- it('reads comments written by addComment', async () => {
-
- let zip: DocxZip;
- let doc: Document;
- let comments: Awaited>;
-
- await allureStep('Given a document with a comment added via addComment', async () => {
- const bodyXml = 'Hello World';
- const buf = await makeDocxBuffer(bodyXml);
- zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
- const docXml = await zip.readText('word/document.xml');
- doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
- await addComment(doc, zip, {
- paragraphEl: p,
- start: 0,
- end: 5,
- author: 'Alice',
- text: 'Nice intro',
- initials: 'A',
- });
- });
-
- await allureStep('When reading comments via getComments', async () => {
- comments = await getComments(zip, doc);
- });
-
- await allureStep('Then exactly one comment is returned', async () => {
- expect(comments).toHaveLength(1);
- });
-
- await allureStep('And comment ID is 0', async () => {
- expect(comments[0]!.id).toBe(0);
- });
-
- await allureStep('And author is Alice', async () => {
- expect(comments[0]!.author).toBe('Alice');
- });
-
- await allureStep('And text is "Nice intro"', async () => {
- expect(comments[0]!.text).toBe('Nice intro');
- });
-
- await allureStep('And initials is "A"', async () => {
- expect(comments[0]!.initials).toBe('A');
- });
-
- await allureStep('And date is populated', async () => {
- expect(comments[0]!.date).toBeTruthy();
- });
-
- await allureStep('And paragraphId is populated', async () => {
- expect(comments[0]!.paragraphId).toBeTruthy();
- });
-
- await allureStep('And replies array is empty', async () => {
- expect(comments[0]!.replies).toEqual([]);
- });
- });
-
- it('reads multiple comments', async () => {
-
- let zip: DocxZip;
- let doc: Document;
- let comments: Awaited>;
-
- await allureStep('Given a document with two comments on different ranges', async () => {
- const bodyXml = 'Hello World Foo';
- const buf = await makeDocxBuffer(bodyXml);
- zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
- const docXml = await zip.readText('word/document.xml');
- doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
- await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'Alice', text: 'First comment' });
- await addComment(doc, zip, { paragraphEl: p, start: 6, end: 11, author: 'Bob', text: 'Second comment' });
- });
-
- await allureStep('When reading comments via getComments', async () => {
- comments = await getComments(zip, doc);
- });
-
- await allureStep('Then two comments are returned', async () => {
- expect(comments).toHaveLength(2);
- });
-
- await allureStep('And first comment text is "First comment"', async () => {
- expect(comments[0]!.text).toBe('First comment');
- });
-
- await allureStep('And second comment text is "Second comment"', async () => {
- expect(comments[1]!.text).toBe('Second comment');
- });
- });
-
- it('builds threaded replies from addCommentReply', async () => {
-
- let zip: DocxZip;
- let doc: Document;
- let comments: Awaited>;
-
- await allureStep('Given a root comment with two replies', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
- const docXml = await zip.readText('word/document.xml');
- doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
- const root = await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'Author', text: 'Root comment' });
- await addCommentReply(doc, zip, { parentCommentId: root.commentId, author: 'Replier', text: 'Reply one' });
- await addCommentReply(doc, zip, { parentCommentId: root.commentId, author: 'Replier2', text: 'Reply two' });
- });
-
- await allureStep('When reading comments via getComments', async () => {
- comments = await getComments(zip, doc);
- });
-
- await allureStep('Then only one root comment is returned at top level', async () => {
- expect(comments).toHaveLength(1);
- });
-
- await allureStep('And root comment text is "Root comment"', async () => {
- expect(comments[0]!.text).toBe('Root comment');
- });
-
- await allureStep('And root comment has two replies', async () => {
- expect(comments[0]!.replies).toHaveLength(2);
- });
-
- await allureStep('And first reply text is "Reply one" by "Replier"', async () => {
- expect(comments[0]!.replies[0]!.text).toBe('Reply one');
- expect(comments[0]!.replies[0]!.author).toBe('Replier');
- });
-
- await allureStep('And second reply text is "Reply two"', async () => {
- expect(comments[0]!.replies[1]!.text).toBe('Reply two');
- });
- });
-
- it('round-trip: write comments, save, reload, read back', async () => {
-
- let buffer: Buffer;
- let comments: Awaited>;
-
- await allureStep('Given a document with a comment and a reply', async () => {
- const bodyXml = 'Hello World';
- const buf = await makeDocxBuffer(bodyXml);
- const doc = await DocxDocument.load(buf);
- doc.insertParagraphBookmarks('test_attachment');
- const { paragraphs } = doc.readParagraphs();
- const paraId = paragraphs[0]!.id;
- await doc.addComment({ paragraphId: paraId, start: 0, end: 5, author: 'RoundTrip Author', text: 'Round trip comment' });
- const replyResult = await doc.addCommentReply({ parentCommentId: 0, author: 'Reply Author', text: 'Round trip reply' });
- expect(replyResult.parentCommentId).toBe(0);
- ({ buffer } = await doc.toBuffer());
- });
-
- await allureStep('When reloading from buffer and reading comments', async () => {
- const reloaded = await DocxDocument.load(buffer);
- comments = await reloaded.getComments();
- });
-
- await allureStep('Then one root comment is returned', async () => {
- expect(comments).toHaveLength(1);
- });
-
- await allureStep('And root comment text matches "Round trip comment"', async () => {
- expect(comments[0]!.text).toBe('Round trip comment');
- });
-
- await allureStep('And root comment author matches "RoundTrip Author"', async () => {
- expect(comments[0]!.author).toBe('RoundTrip Author');
- });
-
- await allureStep('And reply is preserved with correct text', async () => {
- expect(comments[0]!.replies).toHaveLength(1);
- expect(comments[0]!.replies[0]!.text).toBe('Round trip reply');
- });
- });
- });
-
- describe('getComment', () => {
- it('finds a root comment by ID', async () => {
-
- let zip: DocxZip;
- let doc: Document;
- let found: Awaited>;
-
- await allureStep('Given a document with one comment (ID 0)', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
- const docXml = await zip.readText('word/document.xml');
- doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
- await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'FindMe', text: 'Target comment' });
- });
-
- await allureStep('When looking up comment by ID 0', async () => {
- found = await getComment(zip, doc, 0);
- });
-
- await allureStep('Then the comment is found', async () => {
- expect(found).not.toBeNull();
- });
-
- await allureStep('And text is "Target comment"', async () => {
- expect(found!.text).toBe('Target comment');
- });
-
- await allureStep('And author is "FindMe"', async () => {
- expect(found!.author).toBe('FindMe');
- });
- });
-
- it('finds a reply comment by ID', async () => {
-
- let zip: DocxZip;
- let doc: Document;
- let replyId: number;
- let found: Awaited>;
-
- await allureStep('Given a root comment with a reply', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
- const docXml = await zip.readText('word/document.xml');
- doc = parseXml(docXml);
- const p = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
- const root = await addComment(doc, zip, { paragraphEl: p, start: 0, end: 5, author: 'Root', text: 'Root' });
- const reply = await addCommentReply(doc, zip, { parentCommentId: root.commentId, author: 'Reply', text: 'Nested reply' });
- replyId = reply.commentId;
- });
-
- await allureStep('When looking up the reply by its ID', async () => {
- found = await getComment(zip, doc, replyId);
- });
-
- await allureStep('Then the reply is found', async () => {
- expect(found).not.toBeNull();
- });
-
- await allureStep('And text is "Nested reply"', async () => {
- expect(found!.text).toBe('Nested reply');
- });
- });
-
- it('returns null for non-existent ID', async () => {
-
- let found: Awaited>;
-
- await allureStep('Given a document with no comments', async () => {
- const bodyXml = 'Hello';
- const buf = await makeDocxBuffer(bodyXml);
- const zip = await loadZip(buf);
- await bootstrapCommentParts(zip);
- const docXml = await zip.readText('word/document.xml');
- const doc = parseXml(docXml);
- found = await getComment(zip, doc, 999);
- });
-
- await allureStep('Then getComment returns null', async () => {
- expect(found).toBeNull();
- });
- });
- });
-});
diff --git a/packages/docx-core/test-primitives/delete_comment.test.ts b/packages/docx-core/test-primitives/delete_comment.test.ts
deleted file mode 100644
index 400a49f..0000000
--- a/packages/docx-core/test-primitives/delete_comment.test.ts
+++ /dev/null
@@ -1,186 +0,0 @@
-import { describe, expect } from 'vitest';
-import JSZip from 'jszip';
-import { itAllure } from './helpers/allure-test.js';
-import { parseXml, serializeXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { DocxZip } from '../src/primitives/zip.js';
-import {
- addComment,
- addCommentReply,
- bootstrapCommentParts,
- deleteComment,
- getComments,
-} from '../src/primitives/comments.js';
-
-const TEST_FEATURE = 'add-comment-delete-tool';
-const it = itAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE });
-const W_NS = OOXML.W_NS;
-
-function makeDocXml(bodyXml: string): string {
- return (
- `` +
- `` +
- `${bodyXml}` +
- ``
- );
-}
-
-async function makeDocxBuffer(bodyXml: string): Promise {
- const zip = new JSZip();
- zip.file('word/document.xml', makeDocXml(bodyXml));
- return (await zip.generateAsync({ type: 'nodebuffer' })) as Buffer;
-}
-
-async function loadZip(buffer: Buffer): Promise {
- return DocxZip.load(buffer);
-}
-
-describe('deleteComment OpenSpec traceability', () => {
- it
- .openspec('delete root comment with no replies')
- ('Scenario: delete root comment with no replies', async () => {
- const zip = await loadZip(await makeDocxBuffer('Hello world'));
- await bootstrapCommentParts(zip);
-
- const doc = parseXml(await zip.readText('word/document.xml'));
- const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- const root = await addComment(doc, zip, {
- paragraphEl: paragraph,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root comment',
- });
-
- await deleteComment(doc, zip, { commentId: root.commentId });
-
- const comments = await getComments(zip, doc);
- expect(comments).toEqual([]);
-
- const serialized = serializeXml(doc);
- expect(serialized).toContain('');
- });
-
- it
- .openspec('delete root comment cascade-deletes all descendants')
- ('Scenario: delete root comment cascade-deletes all descendants', async () => {
- const zip = await loadZip(await makeDocxBuffer('Hello world'));
- await bootstrapCommentParts(zip);
-
- const doc = parseXml(await zip.readText('word/document.xml'));
- const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- const root = await addComment(doc, zip, {
- paragraphEl: paragraph,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root comment',
- });
- const child = await addCommentReply(doc, zip, {
- parentCommentId: root.commentId,
- author: 'Child',
- text: 'First reply',
- });
- await addCommentReply(doc, zip, {
- parentCommentId: child.commentId,
- author: 'Grandchild',
- text: 'Second reply',
- });
-
- await deleteComment(doc, zip, { commentId: root.commentId });
-
- const comments = await getComments(zip, doc);
- expect(comments).toEqual([]);
-
- const commentsXml = await zip.readText('word/comments.xml');
- expect(commentsXml).not.toContain('Root comment');
- expect(commentsXml).not.toContain('First reply');
- expect(commentsXml).not.toContain('Second reply');
- });
-
- it
- .openspec('delete a leaf reply comment')
- ('Scenario: delete a leaf reply comment', async () => {
- const zip = await loadZip(await makeDocxBuffer('Hello world'));
- await bootstrapCommentParts(zip);
-
- const doc = parseXml(await zip.readText('word/document.xml'));
- const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- const root = await addComment(doc, zip, {
- paragraphEl: paragraph,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root comment',
- });
- const leaf = await addCommentReply(doc, zip, {
- parentCommentId: root.commentId,
- author: 'Leaf',
- text: 'Leaf reply',
- });
-
- await deleteComment(doc, zip, { commentId: leaf.commentId });
-
- const comments = await getComments(zip, doc);
- expect(comments).toHaveLength(1);
- expect(comments[0]!.text).toBe('Root comment');
- expect(comments[0]!.replies).toEqual([]);
- });
-
- it
- .openspec('delete a non-leaf reply cascades to its descendants')
- ('Scenario: delete a non-leaf reply cascades to its descendants', async () => {
- const zip = await loadZip(await makeDocxBuffer('Hello world'));
- await bootstrapCommentParts(zip);
-
- const doc = parseXml(await zip.readText('word/document.xml'));
- const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
-
- const root = await addComment(doc, zip, {
- paragraphEl: paragraph,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root comment',
- });
- const nonLeafReply = await addCommentReply(doc, zip, {
- parentCommentId: root.commentId,
- author: 'Reply',
- text: 'Reply level 1',
- });
- await addCommentReply(doc, zip, {
- parentCommentId: nonLeafReply.commentId,
- author: 'Reply',
- text: 'Reply level 2',
- });
-
- await deleteComment(doc, zip, { commentId: nonLeafReply.commentId });
-
- const comments = await getComments(zip, doc);
- expect(comments).toHaveLength(1);
- expect(comments[0]!.text).toBe('Root comment');
- expect(comments[0]!.replies).toEqual([]);
- });
-
- it
- .openspec('comment not found returns error')
- ('Scenario: comment not found returns error', async () => {
- const zip = await loadZip(await makeDocxBuffer('Hello world'));
- await bootstrapCommentParts(zip);
-
- const doc = parseXml(await zip.readText('word/document.xml'));
- const paragraph = doc.getElementsByTagNameNS(W_NS, W.p).item(0) as Element;
- await addComment(doc, zip, {
- paragraphEl: paragraph,
- start: 0,
- end: 5,
- author: 'Author',
- text: 'Root comment',
- });
-
- await expect(deleteComment(doc, zip, { commentId: 999 })).rejects.toThrow(/not found/i);
- });
-});
diff --git a/packages/docx-core/test-primitives/helpers/allure-test.d.ts b/packages/docx-core/test-primitives/helpers/allure-test.d.ts
deleted file mode 100644
index 29cefc6..0000000
--- a/packages/docx-core/test-primitives/helpers/allure-test.d.ts
+++ /dev/null
@@ -1,6 +0,0 @@
-import { type AllureBddContext as SharedAllureBddContext, type AllureRuntime as SharedAllureRuntime, type AllureStepContext as SharedAllureStepContext } from '../../../../testing/allure-test-factory.js';
-export type AllureRuntime = SharedAllureRuntime;
-export type AllureStepContext = SharedAllureStepContext;
-export type AllureBddContext = SharedAllureBddContext;
-export declare const itAllure: import("@usejunior/allure-test-factory").WrappedAllureTestFn<"DOCX Primitives">, testAllure: import("@usejunior/allure-test-factory").WrappedAllureTestFn<"DOCX Primitives">, allureStep: (name: string, run: () => T | Promise) => Promise, allureParameter: (name: string, value: string) => Promise, allureAttachment: (name: string, content: string | Uint8Array, contentType?: string) => Promise, allureJsonAttachment: (name: string, payload: unknown) => Promise, getAllureRuntime: () => SharedAllureRuntime | undefined;
-//# sourceMappingURL=allure-test.d.ts.map
\ No newline at end of file
diff --git a/packages/docx-core/test-primitives/helpers/allure-test.d.ts.map b/packages/docx-core/test-primitives/helpers/allure-test.d.ts.map
deleted file mode 100644
index 98e6037..0000000
--- a/packages/docx-core/test-primitives/helpers/allure-test.d.ts.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"file":"allure-test.d.ts","sourceRoot":"","sources":["allure-test.ts"],"names":[],"mappings":"AAAA,OAAO,EAEL,KAAK,gBAAgB,IAAI,sBAAsB,EAC/C,KAAK,aAAa,IAAI,mBAAmB,EACzC,KAAK,iBAAiB,IAAI,uBAAuB,EAClD,MAAM,4CAA4C,CAAC;AAKpD,MAAM,MAAM,aAAa,GAAG,mBAAmB,CAAC;AAChD,MAAM,MAAM,iBAAiB,GAAG,uBAAuB,CAAC;AACxD,MAAM,MAAM,gBAAgB,GAAG,sBAAsB,CAAC;AAMtD,eAAO,MACL,QAAQ,mFACR,UAAU,mFACV,UAAU,8DACV,eAAe,kDACf,gBAAgB,uFAChB,oBAAoB,qDACpB,gBAAgB,uCACP,CAAC"}
\ No newline at end of file
diff --git a/packages/docx-core/test-primitives/helpers/allure-test.js b/packages/docx-core/test-primitives/helpers/allure-test.js
deleted file mode 100644
index 0986d3c..0000000
--- a/packages/docx-core/test-primitives/helpers/allure-test.js
+++ /dev/null
@@ -1,6 +0,0 @@
-import { createAllureTestHelpers, } from '../../../../testing/allure-test-factory.js';
-const helpers = createAllureTestHelpers({
- defaultEpic: 'DOCX Primitives',
-});
-export const { itAllure, testAllure, allureStep, allureParameter, allureAttachment, allureJsonAttachment, getAllureRuntime, } = helpers;
-//# sourceMappingURL=allure-test.js.map
\ No newline at end of file
diff --git a/packages/docx-core/test-primitives/helpers/allure-test.js.map b/packages/docx-core/test-primitives/helpers/allure-test.js.map
deleted file mode 100644
index b75a857..0000000
--- a/packages/docx-core/test-primitives/helpers/allure-test.js.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"file":"allure-test.js","sourceRoot":"","sources":["allure-test.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,uBAAuB,GAIxB,MAAM,4CAA4C,CAAC;AASpD,MAAM,OAAO,GAAG,uBAAuB,CAAW;IAChD,WAAW,EAAE,iBAAiB;CAC/B,CAAC,CAAC;AAEH,MAAM,CAAC,MAAM,EACX,QAAQ,EACR,UAAU,EACV,UAAU,EACV,eAAe,EACf,gBAAgB,EAChB,oBAAoB,EACpB,gBAAgB,GACjB,GAAG,OAAO,CAAC"}
\ No newline at end of file
diff --git a/packages/docx-core/test-primitives/helpers/xml-test-utils.ts b/packages/docx-core/test-primitives/helpers/xml-test-utils.ts
deleted file mode 100644
index a879d02..0000000
--- a/packages/docx-core/test-primitives/helpers/xml-test-utils.ts
+++ /dev/null
@@ -1,29 +0,0 @@
-import { parseXml } from '../../src/xml.js';
-
-const W_NS = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main';
-
-/**
- * Wraps body-level OOXML in a minimal w:document/w:body envelope.
- */
-export function wrapInBody(bodyXml: string): string {
- return (
- `` +
- `` +
- `${bodyXml}` +
- ``
- );
-}
-
-/**
- * Creates a minimal w:document DOM with the given body-level XML.
- */
-export function makeDoc(bodyXml: string): Document {
- return parseXml(wrapInBody(bodyXml));
-}
-
-/**
- * Creates a w:p element containing a single w:r/w:t with the given text.
- */
-export function simpleParagraph(text: string): string {
- return `${text}`;
-}
diff --git a/packages/docx-core/test-primitives/normalization.test.ts b/packages/docx-core/test-primitives/normalization.test.ts
deleted file mode 100644
index 95a1255..0000000
--- a/packages/docx-core/test-primitives/normalization.test.ts
+++ /dev/null
@@ -1,259 +0,0 @@
-import { describe, expect } from 'vitest';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { mergeRuns } from '../src/primitives/merge_runs.js';
-import { simplifyRedlines } from '../src/primitives/simplify_redlines.js';
-import { itAllure, allureStep, allureJsonAttachment } from './helpers/allure-test.js';
-
-const TEST_FEATURE = 'add-auto-normalization-on-open';
-const W_NS = OOXML.W_NS;
-
-const it = itAllure.epic('DOCX Primitives').withLabels({ feature: TEST_FEATURE });
-
-const humanReadableIt = it.allure({
-
- tags: ['human-readable'],
-
- parameters: { audience: 'non-technical' },
-
-});
-
-function makeDoc(bodyXml: string): Document {
- const xml =
- `` +
- `` +
- `${bodyXml}` +
- ``;
- return parseXml(xml);
-}
-
-function countRuns(doc: Document): number {
- return doc.getElementsByTagNameNS(W_NS, W.r).length;
-}
-
-function bodyText(doc: Document): string {
- const texts: string[] = [];
- const ts = doc.getElementsByTagNameNS(W_NS, W.t);
- for (let i = 0; i < ts.length; i++) {
- texts.push(ts.item(i)!.textContent ?? '');
- }
- return texts.join('');
-}
-
-function countWrappers(doc: Document, localName: string): number {
- return doc.getElementsByTagNameNS(W_NS, localName).length;
-}
-
-describe('Traceability: Auto-Normalization on Open — Run Merging', () => {
- humanReadableIt.openspec('merge adjacent runs with equivalent formatting')('Scenario: merge adjacent runs with equivalent formatting', async () => {
- const doc = makeDoc(
- '' +
- 'Hello ' +
- 'World' +
- '',
- );
-
- const result = await allureStep('When merge_runs is called on adjacent format-identical runs', async () => {
- const r = mergeRuns(doc);
- await allureJsonAttachment('merge_runs result', r);
- return r;
- });
-
- await allureStep('Then the adjacent runs SHALL be consolidated into a single run', () => {
- expect(result.runsMerged).toBeGreaterThanOrEqual(1);
- expect(countRuns(doc)).toBe(1);
- });
-
- await allureStep('And the merged run SHALL preserve the original visible text and formatting', () => {
- expect(bodyText(doc)).toBe('Hello World');
- const rPr = doc.getElementsByTagNameNS(W_NS, W.rPr).item(0);
- expect(rPr).toBeTruthy();
- expect(rPr!.getElementsByTagNameNS(W_NS, W.b).length).toBe(1);
- });
- });
-
- humanReadableIt.openspec('never merge across field boundaries')('Scenario: never merge across field boundaries', async () => {
- const doc = makeDoc(
- '' +
- 'Before' +
- '' +
- ' MERGEFIELD Name ' +
- '' +
- 'Value' +
- '' +
- 'After' +
- '',
- );
-
- const result = await allureStep('When merge_runs is called on runs separated by fldChar elements', async () => {
- const r = mergeRuns(doc);
- await allureJsonAttachment('merge_runs result', r);
- return r;
- });
-
- await allureStep('Then the runs SHALL NOT be merged across the field boundary', () => {
- expect(result.runsMerged).toBe(0);
- });
-
- await allureStep('And field structure SHALL remain intact', () => {
- expect(countRuns(doc)).toBe(7);
- expect(bodyText(doc)).toContain('Before');
- expect(bodyText(doc)).toContain('After');
- });
- });
-
- humanReadableIt.openspec('never merge across comment range boundaries')('Scenario: never merge across comment range boundaries', async () => {
- const doc = makeDoc(
- '' +
- 'Before' +
- '' +
- 'After' +
- '' +
- '',
- );
-
- const result = await allureStep('When merge_runs is called on runs separated by comment range markers', async () => {
- const r = mergeRuns(doc);
- await allureJsonAttachment('merge_runs result', r);
- return r;
- });
-
- await allureStep('Then the runs SHALL NOT be merged across comment range boundaries', () => {
- expect(result.runsMerged).toBe(0);
- expect(countRuns(doc)).toBe(2);
- });
- });
-
- humanReadableIt.openspec('never merge across bookmark boundaries')('Scenario: never merge across bookmark boundaries', async () => {
- const doc = makeDoc(
- '' +
- 'Before' +
- '' +
- 'After' +
- '' +
- '',
- );
-
- const result = await allureStep('When merge_runs is called on runs separated by bookmark markers', async () => {
- const r = mergeRuns(doc);
- await allureJsonAttachment('merge_runs result', r);
- return r;
- });
-
- await allureStep('Then the runs SHALL NOT be merged across bookmark boundaries', () => {
- expect(result.runsMerged).toBe(0);
- expect(countRuns(doc)).toBe(2);
- });
- });
-
- humanReadableIt.openspec('never merge across tracked-change wrapper boundaries')('Scenario: never merge across tracked-change wrapper boundaries', async () => {
- const doc = makeDoc(
- '' +
- '' +
- 'Inserted1' +
- '' +
- '' +
- 'Deleted1' +
- '' +
- '',
- );
-
- const result = await allureStep('When merge_runs is called on runs in different tracked-change wrappers', async () => {
- const r = mergeRuns(doc);
- await allureJsonAttachment('merge_runs result', r);
- return r;
- });
-
- await allureStep('Then runs in different tracked-change wrappers SHALL NOT be merged', () => {
- expect(result.runsMerged).toBe(0);
- });
- });
-});
-
-describe('Traceability: Auto-Normalization on Open — Redline Simplification', () => {
- humanReadableIt.openspec('merge adjacent same-author same-type tracked-change wrappers')('Scenario: merge adjacent same-author same-type tracked-change wrappers', async () => {
- const doc = makeDoc(
- '' +
- '' +
- 'Hello ' +
- '' +
- '' +
- 'World' +
- '' +
- '',
- );
-
- const result = await allureStep('When simplify_redlines is called on adjacent same-author w:ins wrappers', async () => {
- const r = simplifyRedlines(doc);
- await allureJsonAttachment('simplify_redlines result', r);
- return r;
- });
-
- await allureStep('Then the adjacent wrappers SHALL be consolidated into a single wrapper', () => {
- expect(result.wrappersConsolidated).toBeGreaterThanOrEqual(1);
- expect(countWrappers(doc, 'ins')).toBe(1);
- });
-
- await allureStep('And the merged wrapper SHALL preserve all child content', () => {
- expect(bodyText(doc)).toBe('Hello World');
- });
- });
-
- humanReadableIt.openspec('never merge wrappers from different authors')('Scenario: never merge wrappers from different authors', async () => {
- const doc = makeDoc(
- '' +
- '' +
- 'Alice text' +
- '' +
- '' +
- 'Bob text' +
- '' +
- '',
- );
-
- const result = await allureStep('When simplify_redlines is called on adjacent different-author wrappers', async () => {
- const r = simplifyRedlines(doc);
- await allureJsonAttachment('simplify_redlines result', r);
- return r;
- });
-
- await allureStep('Then the wrappers SHALL NOT be merged', () => {
- expect(result.wrappersConsolidated).toBe(0);
- expect(countWrappers(doc, 'ins')).toBe(2);
- });
-
- await allureStep('And author attribution SHALL be preserved', () => {
- const wrappers = doc.getElementsByTagNameNS(W_NS, 'ins');
- const authors = new Set();
- for (let i = 0; i < wrappers.length; i++) {
- authors.add(wrappers.item(i)!.getAttribute('w:author') ?? '');
- }
- expect(authors).toEqual(new Set(['Alice', 'Bob']));
- });
- });
-
- humanReadableIt.openspec('never merge across different change types')('Scenario: never merge across different change types', async () => {
- const doc = makeDoc(
- '' +
- '' +
- 'New' +
- '' +
- '' +
- 'Old' +
- '' +
- '',
- );
-
- const result = await allureStep('When simplify_redlines is called on adjacent w:ins + w:del from same author', async () => {
- const r = simplifyRedlines(doc);
- await allureJsonAttachment('simplify_redlines result', r);
- return r;
- });
-
- await allureStep('Then the wrappers SHALL NOT be merged', () => {
- expect(result.wrappersConsolidated).toBe(0);
- expect(countWrappers(doc, 'ins')).toBe(1);
- expect(countWrappers(doc, 'del')).toBe(1);
- });
- });
-});
diff --git a/packages/docx-core/test-primitives/setup-allure-labels.ts b/packages/docx-core/test-primitives/setup-allure-labels.ts
deleted file mode 100644
index 22d9807..0000000
--- a/packages/docx-core/test-primitives/setup-allure-labels.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Global Allure label setup for docx-primitives-ts.
- *
- * Sets package-level parentSuite and epic so the Allure report groups tests
- * under "DOCX Primitives" instead of the flat "test" directory name.
- *
- * Tests using `itAllure`/`testAllure` override these defaults in their test
- * body — the wrapWithAllure() call runs AFTER beforeEach, so its values win.
- */
-import { beforeEach, expect } from 'vitest';
-
-declare const allure: { epic: (name: string) => void | Promise; feature: (name: string) => void | Promise; parentSuite: (name: string) => void | Promise; suite: (name: string) => void | Promise; subSuite: (name: string) => void | Promise; severity: (level: string) => void | Promise; story: (name: string) => void | Promise; id: (id: string) => void | Promise; allureId: (id: string) => void | Promise; displayName: (value: string) => void | Promise; label: (name: string, value: string) => void | Promise; description: (value: string) => void | Promise; tags: (...values: string[]) => void | Promise; tag: (value: string) => void | Promise; test: (value: unknown) => void | Promise; step: (name: string, body: (...args: unknown[]) => T | Promise) => Promise; parameter: (name: string, value: string) => void | Promise; attachment: (name: string, content: string | Uint8Array, contentType?: string) => void | Promise; };
-
-const PACKAGE_NAME = 'DOCX Primitives';
-
-beforeEach(async () => {
- if (typeof allure === 'undefined') return;
-
- const state = expect.getState() as { currentTestName?: string };
- const parts = (state.currentTestName ?? '')
- .split(' > ')
- .map((s) => s.trim())
- .filter(Boolean);
-
- // Top-level grouping in Suites view
- await allure.parentSuite(PACKAGE_NAME);
-
- // First describe block → suite
- const suiteName = parts[0];
- if (parts.length > 1 && suiteName) {
- await allure.suite(suiteName);
- }
-
- // Second describe block → subSuite
- const subSuiteName = parts[1];
- if (parts.length > 2 && subSuiteName) {
- await allure.subSuite(subSuiteName);
- }
-
- // Behaviors view: epic + feature
- await allure.epic(PACKAGE_NAME);
- const featureName = parts[0];
- if (featureName) {
- await allure.feature(featureName);
- }
-});
diff --git a/packages/docx-core/test-primitives/text.test.ts b/packages/docx-core/test-primitives/text.test.ts
deleted file mode 100644
index 064cf0b..0000000
--- a/packages/docx-core/test-primitives/text.test.ts
+++ /dev/null
@@ -1,102 +0,0 @@
-import { describe, expect } from 'vitest';
-import { itAllure as it } from './helpers/allure-test.js';
-import { parseXml } from '../src/primitives/xml.js';
-import { OOXML, W } from '../src/primitives/namespaces.js';
-import { SafeDocxError } from '../src/primitives/errors.js';
-import { getParagraphRuns, getParagraphText, replaceParagraphTextRange } from '../src/primitives/text.js';
-
-function makeDoc(bodyXml: string): Document {
- const xml =
- `` +
- `` +
- `${bodyXml}` +
- ``;
- return parseXml(xml);
-}
-
-function firstParagraph(doc: Document): Element {
- const p = doc.getElementsByTagNameNS(OOXML.W_NS, W.p).item(0);
- if (!p) throw new Error('missing paragraph');
- return p;
-}
-
-describe('text primitives', () => {
- it('extracts paragraph runs and tracks field-result visibility', () => {
- const doc = makeDoc(
- `` +
- `` +
- `REF Clause_1` +
- `Visible` +
- ` Result` +
- `` +
- ` tail` +
- ``
- );
-
- const runs = getParagraphRuns(firstParagraph(doc));
- expect(runs.map((r) => r.text)).toEqual(['Visible', ' Result', ' tail\t\n']);
- expect(runs.map((r) => r.isFieldResult)).toEqual([true, true, false]);
- expect(getParagraphText(firstParagraph(doc))).toBe('Visible Result tail\t\n');
- });
-
- it('replaces a cross-run range and applies additive run props', () => {
- const doc = makeDoc(
- `` +
- `Hello` +
- ` world` +
- ``
- );
- const p = firstParagraph(doc);
-
- replaceParagraphTextRange(p, 3, 8, [
- {
- text: 'X\tY',
- addRunProps: { underline: true, highlight: true },
- },
- ]);
-
- expect(getParagraphText(p)).toBe('HelX\tYrld');
- const xml = p.toString();
- expect(xml).toContain('');
- expect(xml).toContain('');
- });
-
- it('throws UNSUPPORTED_EDIT when a multi-run edit intersects field results', () => {
- const doc = makeDoc(
- `` +
- `` +
- `REF X` +
- `Visible` +
- ` Result` +
- `` +
- ``
- );
- const p = firstParagraph(doc);
-
- expect(() => replaceParagraphTextRange(p, 0, 13, 'Updated')).toThrowError(SafeDocxError);
- try {
- replaceParagraphTextRange(p, 0, 13, 'Updated');
- } catch (e: unknown) {
- if (!(e instanceof SafeDocxError)) throw e;
- expect(e.code).toBe('UNSUPPORTED_EDIT');
- }
- });
-
- it('throws UNSAFE_CONTAINER_BOUNDARY when replacement spans different run containers', () => {
- const doc = makeDoc(
- `` +
- `Link` +
- `Tail` +
- ``
- );
- const p = firstParagraph(doc);
-
- expect(() => replaceParagraphTextRange(p, 2, 6, 'Changed')).toThrowError(SafeDocxError);
- try {
- replaceParagraphTextRange(p, 2, 6, 'Changed');
- } catch (e: unknown) {
- if (!(e instanceof SafeDocxError)) throw e;
- expect(e.code).toBe('UNSAFE_CONTAINER_BOUNDARY');
- }
- });
-});
diff --git a/packages/docx-core/vitest.baseline.config.ts b/packages/docx-core/vitest.baseline.config.ts
index f380bce..44d219e 100644
--- a/packages/docx-core/vitest.baseline.config.ts
+++ b/packages/docx-core/vitest.baseline.config.ts
@@ -54,7 +54,7 @@ export default defineConfig({
resultsDir: allureResultsDir,
cleanResultsDir: true,
packageName: 'DOCX Comparison',
- packageNameOverrides: { 'test-primitives': 'DOCX Primitives' },
+ packageNameOverrides: { 'src/primitives': 'DOCX Primitives' },
},
],
]
diff --git a/packages/docx-core/vitest.config.ts b/packages/docx-core/vitest.config.ts
index b3a4bbf..effe595 100644
--- a/packages/docx-core/vitest.config.ts
+++ b/packages/docx-core/vitest.config.ts
@@ -42,7 +42,7 @@ export default defineConfig({
test: {
globals: true,
environment: 'node',
- include: ['src/**/*.test.ts', 'test-primitives/**/*.test.ts'],
+ include: ['src/**/*.test.ts'],
exclude: ['src/baselines/**/*.test.ts'],
coverage: {
provider: 'v8',
@@ -72,7 +72,7 @@ export default defineConfig({
resultsDir: allureResultsDir,
cleanResultsDir: true,
packageName: 'DOCX Comparison',
- packageNameOverrides: { 'test-primitives': 'DOCX Primitives' },
+ packageNameOverrides: { 'src/primitives': 'DOCX Primitives' },
},
],
]
diff --git a/packages/docx-mcp/src/add_multi_platform_mcp_discovery.test.ts b/packages/docx-mcp/src/add_multi_platform_mcp_discovery.test.ts
index 1adc7d0..62949f5 100644
--- a/packages/docx-mcp/src/add_multi_platform_mcp_discovery.test.ts
+++ b/packages/docx-mcp/src/add_multi_platform_mcp_discovery.test.ts
@@ -2,7 +2,7 @@ import { describe, expect } from 'vitest';
import fs from 'node:fs';
import path from 'node:path';
-import { testAllure } from './testing/allure-test.js';
+import { testAllure, allureStep } from './testing/allure-test.js';
const REPO_ROOT = path.resolve(import.meta.dirname, '..', '..', '..');
@@ -18,63 +18,91 @@ describe('Multi-platform MCP discovery docs', () => {
humanReadableTest.openspec('Gemini CLI discovers SafeDocX via extension manifest')(
'Scenario: Gemini CLI discovers SafeDocX via extension manifest',
async () => {
- const manifestPath = path.join(REPO_ROOT, 'gemini-extension.json');
- expect(fs.existsSync(manifestPath)).toBe(true);
-
- const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
- expect(manifest.mcpServers).toBeDefined();
- expect(manifest.mcpServers['safe-docx']).toBeDefined();
- expect(manifest.mcpServers['safe-docx'].command).toBe('npx');
- expect(manifest.mcpServers['safe-docx'].args).toContain('@usejunior/safe-docx');
+ const manifestPath = await allureStep('Given the gemini-extension.json manifest exists', async () => {
+ const p = path.join(REPO_ROOT, 'gemini-extension.json');
+ expect(fs.existsSync(p)).toBe(true);
+ return p;
+ });
+
+ const manifest = await allureStep('When the manifest is parsed', async () => {
+ return JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
+ });
+
+ await allureStep('Then it declares a safe-docx MCP server via npx', () => {
+ expect(manifest.mcpServers).toBeDefined();
+ expect(manifest.mcpServers['safe-docx']).toBeDefined();
+ expect(manifest.mcpServers['safe-docx'].command).toBe('npx');
+ expect(manifest.mcpServers['safe-docx'].args).toContain('@usejunior/safe-docx');
+ });
},
);
humanReadableTest.openspec('Extension manifest is valid JSON with required fields')(
'Scenario: Extension manifest is valid JSON with required fields',
async () => {
- const manifestPath = path.join(REPO_ROOT, 'gemini-extension.json');
- const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
+ const manifest = await allureStep('Given the parsed gemini-extension.json', async () => {
+ const manifestPath = path.join(REPO_ROOT, 'gemini-extension.json');
+ return JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
+ });
- expect(manifest.name).toBe('safe-docx');
- expect(manifest.version).toBeDefined();
- expect(manifest.description).toBeDefined();
- expect(manifest.mcpServers).toBeDefined();
+ await allureStep('When checking top-level required fields', () => {
+ expect(manifest.name).toBe('safe-docx');
+ expect(manifest.version).toBeDefined();
+ expect(manifest.description).toBeDefined();
+ expect(manifest.mcpServers).toBeDefined();
+ });
- const server = manifest.mcpServers['safe-docx'];
- expect(server.command).toBeDefined();
- expect(server.args).toBeDefined();
+ await allureStep('Then the safe-docx server entry has command and args', () => {
+ const server = manifest.mcpServers['safe-docx'];
+ expect(server.command).toBeDefined();
+ expect(server.args).toBeDefined();
+ });
},
);
humanReadableTest.openspec('AI agent configures SafeDocX from install guide')(
'Scenario: AI agent configures SafeDocX from install guide',
async () => {
- const installGuidePath = path.join(REPO_ROOT, 'packages', 'docx-mcp', 'llms-install.md');
- expect(fs.existsSync(installGuidePath)).toBe(true);
-
- const content = fs.readFileSync(installGuidePath, 'utf-8');
- expect(content).toContain('npx');
- expect(content).toContain('@usejunior/safe-docx');
- expect(content).toContain('Claude Desktop');
- expect(content).toContain('Claude Code');
- expect(content).toContain('Gemini CLI');
- expect(content).toContain('Cline');
- expect(content).toContain('Generic MCP Client');
+ const content = await allureStep('Given the llms-install.md guide exists', async () => {
+ const installGuidePath = path.join(REPO_ROOT, 'packages', 'docx-mcp', 'llms-install.md');
+ expect(fs.existsSync(installGuidePath)).toBe(true);
+ return fs.readFileSync(installGuidePath, 'utf-8');
+ });
+
+ await allureStep('When checking for npx install command', () => {
+ expect(content).toContain('npx');
+ expect(content).toContain('@usejunior/safe-docx');
+ });
+
+ await allureStep('Then all supported platforms are documented', () => {
+ expect(content).toContain('Claude Desktop');
+ expect(content).toContain('Claude Code');
+ expect(content).toContain('Gemini CLI');
+ expect(content).toContain('Cline');
+ expect(content).toContain('Generic MCP Client');
+ });
},
);
humanReadableTest.openspec('Gemini model reads context file for tool guidance')(
'Scenario: Gemini model reads context file for tool guidance',
async () => {
- const geminiMdPath = path.join(REPO_ROOT, 'GEMINI.md');
- expect(fs.existsSync(geminiMdPath)).toBe(true);
-
- const content = fs.readFileSync(geminiMdPath, 'utf-8');
- expect(content).toContain('read_file');
- expect(content).toContain('replace_text');
- expect(content).toContain('save');
- expect(content).toContain('local');
- expect(content).toContain('Trust Boundary');
+ const content = await allureStep('Given the GEMINI.md context file exists', async () => {
+ const geminiMdPath = path.join(REPO_ROOT, 'GEMINI.md');
+ expect(fs.existsSync(geminiMdPath)).toBe(true);
+ return fs.readFileSync(geminiMdPath, 'utf-8');
+ });
+
+ await allureStep('When checking for tool usage guidance', () => {
+ expect(content).toContain('read_file');
+ expect(content).toContain('replace_text');
+ expect(content).toContain('save');
+ });
+
+ await allureStep('Then trust boundary and local-only scope are documented', () => {
+ expect(content).toContain('local');
+ expect(content).toContain('Trust Boundary');
+ });
},
);
});
diff --git a/packages/docx-mcp/src/add_safe_docx_ts_formatting_parity.test.ts b/packages/docx-mcp/src/add_safe_docx_ts_formatting_parity.test.ts
index cf63ea1..69cacbd 100644
--- a/packages/docx-mcp/src/add_safe_docx_ts_formatting_parity.test.ts
+++ b/packages/docx-mcp/src/add_safe_docx_ts_formatting_parity.test.ts
@@ -11,7 +11,7 @@ import {
makeDocxWithDocumentXml,
makeMinimalDocx,
} from './testing/docx_test_utils.js';
-import { testAllure } from './testing/allure-test.js';
+import { testAllure, allureStep } from './testing/allure-test.js';
import {
openSession,
assertSuccess,
@@ -35,100 +35,132 @@ describe('Traceability: TypeScript Formatting Parity', () => {
registerCleanup();
humanReadableTest.openspec('read_file returns TOON schema with structure columns')('Scenario: read_file returns TOON schema with structure columns', async () => {
- const { content } = await openSession(['Body paragraph']);
- expect(content).toContain('#SCHEMA id | list_label | header | style | text');
- expect(content).toContain('Body paragraph');
+ const { content } = await allureStep('Given a session with one body paragraph', async () => {
+ return openSession(['Body paragraph']);
+ });
+
+ await allureStep('Then the TOON output contains schema header and paragraph text', async () => {
+ expect(content).toContain('#SCHEMA id | list_label | header | style | text');
+ expect(content).toContain('Body paragraph');
+ });
});
humanReadableTest.openspec('read_file JSON mode returns node metadata')('Scenario: read_file JSON mode returns node metadata', async () => {
- const { mgr, sessionId } = await openSession(['Alpha']);
-
- const read = await readFile(mgr, { session_id: sessionId, format: 'json' });
- assertSuccess(read, 'read');
- const nodes = JSON.parse(String(read.content)) as Array>;
- expect(nodes.length).toBeGreaterThan(0);
- const node = nodes[0]!;
- expect(node).toHaveProperty('id');
- expect(node).toHaveProperty('list_label');
- expect(node).toHaveProperty('header');
- expect(node).toHaveProperty('style');
- expect(node).toHaveProperty('text');
- expect(node).toHaveProperty('style_fingerprint');
- expect(node).toHaveProperty('header_formatting');
- expect(node).toHaveProperty('numbering');
+ const { mgr, sessionId } = await allureStep('Given a session with one paragraph', async () => {
+ return openSession(['Alpha']);
+ });
+
+ const nodes = await allureStep('When read_file is called in JSON format', async () => {
+ const read = await readFile(mgr, { session_id: sessionId, format: 'json' });
+ assertSuccess(read, 'read');
+ return JSON.parse(String(read.content)) as Array>;
+ });
+
+ await allureStep('Then node metadata includes all expected properties', async () => {
+ expect(nodes.length).toBeGreaterThan(0);
+ const node = nodes[0]!;
+ expect(node).toHaveProperty('id');
+ expect(node).toHaveProperty('list_label');
+ expect(node).toHaveProperty('header');
+ expect(node).toHaveProperty('style');
+ expect(node).toHaveProperty('text');
+ expect(node).toHaveProperty('style_fingerprint');
+ expect(node).toHaveProperty('header_formatting');
+ expect(node).toHaveProperty('numbering');
+ });
});
humanReadableTest.openspec('fingerprint ignores volatile attributes')('Scenario: fingerprint ignores volatile attributes', async () => {
- const base =
- `` +
- `` +
- `` +
- `Clause text` +
- ``;
-
- const xmlA = base.replace('RSID_MARKER', 'w:rsidR="00112233" w:rsidRDefault="00112233"');
- const xmlB = base.replace('RSID_MARKER', 'w:rsidR="AABBCCDD" w:rsidRDefault="AABBCCDD"');
-
- const mgr = createTestSessionManager();
- const tmpDir = await createTrackedTempDir('safe-docx-fingerprint-');
- const pathA = path.join(tmpDir, 'a.docx');
- const pathB = path.join(tmpDir, 'b.docx');
- await fs.writeFile(pathA, new Uint8Array(await makeDocxWithDocumentXml(xmlA)));
- await fs.writeFile(pathB, new Uint8Array(await makeDocxWithDocumentXml(xmlB)));
-
- const openA = await openDocument(mgr, { file_path: pathA });
- const openB = await openDocument(mgr, { file_path: pathB });
- assertSuccess(openA, 'openA');
- assertSuccess(openB, 'openB');
-
- const readA = await readFile(mgr, { session_id: openA.session_id as string, format: 'json' });
- const readB = await readFile(mgr, { session_id: openB.session_id as string, format: 'json' });
- assertSuccess(readA, 'readA');
- assertSuccess(readB, 'readB');
-
- const nodeA = (JSON.parse(String(readA.content)) as Array>)[0]!;
- const nodeB = (JSON.parse(String(readB.content)) as Array>)[0]!;
- expect(nodeA.style_fingerprint).toEqual(nodeB.style_fingerprint);
- expect(nodeA.style).toEqual(nodeB.style);
+ const { mgr, openA, openB } = await allureStep('Given two docs differing only in RSID attributes', async () => {
+ const base =
+ `` +
+ `` +
+ `` +
+ `Clause text` +
+ ``;
+
+ const xmlA = base.replace('RSID_MARKER', 'w:rsidR="00112233" w:rsidRDefault="00112233"');
+ const xmlB = base.replace('RSID_MARKER', 'w:rsidR="AABBCCDD" w:rsidRDefault="AABBCCDD"');
+
+ const mgr = createTestSessionManager();
+ const tmpDir = await createTrackedTempDir('safe-docx-fingerprint-');
+ const pathA = path.join(tmpDir, 'a.docx');
+ const pathB = path.join(tmpDir, 'b.docx');
+ await fs.writeFile(pathA, new Uint8Array(await makeDocxWithDocumentXml(xmlA)));
+ await fs.writeFile(pathB, new Uint8Array(await makeDocxWithDocumentXml(xmlB)));
+
+ const openA = await openDocument(mgr, { file_path: pathA });
+ const openB = await openDocument(mgr, { file_path: pathB });
+ assertSuccess(openA, 'openA');
+ assertSuccess(openB, 'openB');
+ return { mgr, openA, openB };
+ });
+
+ const { nodeA, nodeB } = await allureStep('When both docs are read in JSON format', async () => {
+ const readA = await readFile(mgr, { session_id: openA.session_id as string, format: 'json' });
+ const readB = await readFile(mgr, { session_id: openB.session_id as string, format: 'json' });
+ assertSuccess(readA, 'readA');
+ assertSuccess(readB, 'readB');
+ const nodeA = (JSON.parse(String(readA.content)) as Array>)[0]!;
+ const nodeB = (JSON.parse(String(readB.content)) as Array>)[0]!;
+ return { nodeA, nodeB };
+ });
+
+ await allureStep('Then fingerprints and styles match despite different RSIDs', async () => {
+ expect(nodeA.style_fingerprint).toEqual(nodeB.style_fingerprint);
+ expect(nodeA.style).toEqual(nodeB.style);
+ });
});
humanReadableTest.openspec('stable style IDs within a session')('Scenario: stable style IDs within a session', async () => {
- const { mgr, sessionId } = await openSession(['One', 'Two']);
+ const { mgr, sessionId } = await allureStep('Given a session with two paragraphs', async () => {
+ return openSession(['One', 'Two']);
+ });
- const read1 = await readFile(mgr, { session_id: sessionId, format: 'json' });
- const read2 = await readFile(mgr, { session_id: sessionId, format: 'json' });
- assertSuccess(read1, 'read1');
- assertSuccess(read2, 'read2');
+ const { nodes1, nodes2 } = await allureStep('When read_file is called twice in JSON format', async () => {
+ const read1 = await readFile(mgr, { session_id: sessionId, format: 'json' });
+ const read2 = await readFile(mgr, { session_id: sessionId, format: 'json' });
+ assertSuccess(read1, 'read1');
+ assertSuccess(read2, 'read2');
+ const nodes1 = JSON.parse(String(read1.content)) as Array>;
+ const nodes2 = JSON.parse(String(read2.content)) as Array>;
+ return { nodes1, nodes2 };
+ });
- const nodes1 = JSON.parse(String(read1.content)) as Array>;
- const nodes2 = JSON.parse(String(read2.content)) as Array>;
- expect(nodes1.map((n) => n.style)).toEqual(nodes2.map((n) => n.style));
+ await allureStep('Then style IDs are identical across both reads', async () => {
+ expect(nodes1.map((n) => n.style)).toEqual(nodes2.map((n) => n.style));
+ });
});
humanReadableTest.openspec('formatting-based header detection')('Scenario: formatting-based header detection', async () => {
- const xml =
- `` +
- `` +
- `` +
- `` +
- `Security Incidents:` +
- ` Recipient must notify promptly.` +
- `` +
- ``;
-
- const { mgr, sessionId } = await openSession([], { xml });
-
- const read = await readFile(mgr, { session_id: sessionId });
- assertSuccess(read, 'read');
-
- const row = String(read.content)
- .split('\n')
- .find((line) => line.startsWith('_bk_'));
- expect(row).toBeTruthy();
- const cols = row!.split('|').map((c) => c.trim());
- expect(cols[2]).toBe('Security Incidents');
- expect(cols[4]).toContain('Recipient must notify promptly.');
- expect(cols[4]).not.toContain('Security Incidents:');
+ const { mgr, sessionId } = await allureStep('Given a doc with bold run-in header', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `` +
+ `Security Incidents:` +
+ ` Recipient must notify promptly.` +
+ `` +
+ ``;
+ return openSession([], { xml });
+ });
+
+ const row = await allureStep('When read_file is called in TOON format', async () => {
+ const read = await readFile(mgr, { session_id: sessionId });
+ assertSuccess(read, 'read');
+ return String(read.content)
+ .split('\n')
+ .find((line) => line.startsWith('_bk_'));
+ });
+
+ await allureStep('Then header column contains the bold prefix and text column the rest', async () => {
+ expect(row).toBeTruthy();
+ const cols = row!.split('|').map((c) => c.trim());
+ expect(cols[2]).toBe('Security Incidents');
+ expect(cols[4]).toContain('Recipient must notify promptly.');
+ expect(cols[4]).not.toContain('Security Incidents:');
+ });
});
humanReadableTest.openspec('replace_text preserves mixed-run formatting')('Scenario: replace_text preserves mixed-run formatting', async () => {
@@ -138,266 +170,296 @@ describe('Traceability: TypeScript Formatting Parity', () => {
// To get mixed formatting in the output, the AI must use markup tags.
//
// Sub-scenario 1: plain replacement → single uniform run (predominant template)
- const xml =
- `` +
- `` +
- `` +
- `` +
- `ABC` +
- `DEF` +
- `GHI` +
- `` +
- ``;
-
- const { mgr, sessionId, tmpDir, firstParaId: paraId } = await openSession([], { xml, prefix: 'safe-docx-mixed-run-' });
- const outPath = path.join(tmpDir, 'out.docx');
-
- const edited = await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: 'ABCDEFGHI',
- new_string: '123456789',
- instruction: 'full replacement uses template run',
- });
- assertSuccess(edited, 'edit');
-
- const saved = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: outPath,
- clean_bookmarks: true,
- save_format: 'clean',
- });
- assertSuccess(saved, 'save');
-
- const { runs, runText, hasBold, hasItalic } = await parseOutputXml(outPath);
-
- // Full replacement with no shared prefix/suffix → single run with predominant template
- const fullRun = runs.find((r) => runText(r) === '123456789');
- expect(fullRun).toBeTruthy();
+ const { mgr, sessionId, tmpDir, paraId } = await allureStep('Given a paragraph with bold/normal/italic runs', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `` +
+ `ABC` +
+ `DEF` +
+ `GHI` +
+ `` +
+ ``;
+ const result = await openSession([], { xml, prefix: 'safe-docx-mixed-run-' });
+ return { mgr: result.mgr, sessionId: result.sessionId, tmpDir: result.tmpDir, paraId: result.firstParaId };
+ });
+
+ await allureStep('When the full text is replaced and saved', async () => {
+ const outPath = path.join(tmpDir, 'out.docx');
+ const edited = await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: 'ABCDEFGHI',
+ new_string: '123456789',
+ instruction: 'full replacement uses template run',
+ });
+ assertSuccess(edited, 'edit');
+ const saved = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: outPath,
+ clean_bookmarks: true,
+ save_format: 'clean',
+ });
+ assertSuccess(saved, 'save');
+ });
+
+ await allureStep('Then a single run with the replacement text is produced', async () => {
+ const outPath = path.join(tmpDir, 'out.docx');
+ const { runs, runText } = await parseOutputXml(outPath);
+ const fullRun = runs.find((r) => runText(r) === '123456789');
+ expect(fullRun).toBeTruthy();
+ });
});
humanReadableTest.openspec('insert_paragraph preserves header semantics')('Scenario: insert_paragraph preserves header semantics', async () => {
- const xml =
- `` +
- `` +
- `` +
- `` +
- `Security Incidents:` +
- ` Existing text.` +
- `` +
- `Anchor paragraph.` +
- ``;
-
- const { mgr, sessionId, tmpDir } = await openSession([], { xml, prefix: 'safe-docx-insert-semantics-' });
- const outPath = path.join(tmpDir, 'out.docx');
-
- const read = await readFile(mgr, { session_id: sessionId, format: 'json' });
- assertSuccess(read, 'read');
- const nodes = JSON.parse(String(read.content)) as Array<{ id: string; clean_text: string; header: string; text: string }>;
- const anchorId = nodes.find((n) => n.clean_text.includes('Anchor paragraph.'))?.id;
- expect(anchorId).toMatch(/^_bk_[0-9a-f]{12}$/);
-
- const inserted = await insertParagraph(mgr, {
- session_id: sessionId,
- positional_anchor_node_id: anchorId!,
- new_string: 'Security Incidents: New incident text.',
- instruction: 'semantic insert',
- position: 'AFTER',
- });
- assertSuccess(inserted, 'insert');
- const insertedId = inserted.new_paragraph_id as string;
-
- const read2 = await readFile(mgr, { session_id: sessionId, format: 'json' });
- assertSuccess(read2, 'read2');
- const nodes2 = JSON.parse(String(read2.content)) as Array<{ id: string; header: string; text: string }>;
- const insertedNode = nodes2.find((n) => n.id === insertedId);
- expect(insertedNode).toBeTruthy();
- expect(insertedNode!.header).toBe('Security Incidents');
-
- const saved = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: outPath,
- clean_bookmarks: true,
- save_format: 'clean',
- });
- assertSuccess(saved, 'save');
-
- const { runs, runText, hasBold } = await parseOutputXml(outPath);
-
- const headerRun = runs.find((r) => runText(r) === 'Security Incidents:');
- expect(headerRun).toBeTruthy();
- expect(hasBold(headerRun!)).toBe(true);
+ const { mgr, sessionId, tmpDir, anchorId } = await allureStep('Given a doc with a bold header paragraph and an anchor', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `` +
+ `Security Incidents:` +
+ ` Existing text.` +
+ `` +
+ `Anchor paragraph.` +
+ ``;
+ const result = await openSession([], { xml, prefix: 'safe-docx-insert-semantics-' });
+ const read = await readFile(result.mgr, { session_id: result.sessionId, format: 'json' });
+ assertSuccess(read, 'read');
+ const nodes = JSON.parse(String(read.content)) as Array<{ id: string; clean_text: string; header: string; text: string }>;
+ const anchorId = nodes.find((n) => n.clean_text.includes('Anchor paragraph.'))?.id;
+ expect(anchorId).toMatch(/^_bk_[0-9a-f]{12}$/);
+ return { mgr: result.mgr, sessionId: result.sessionId, tmpDir: result.tmpDir, anchorId: anchorId! };
+ });
+
+ const insertedId = await allureStep('When a paragraph with RunInHeader markup is inserted', async () => {
+ const inserted = await insertParagraph(mgr, {
+ session_id: sessionId,
+ positional_anchor_node_id: anchorId,
+ new_string: 'Security Incidents: New incident text.',
+ instruction: 'semantic insert',
+ position: 'AFTER',
+ });
+ assertSuccess(inserted, 'insert');
+ return inserted.new_paragraph_id as string;
+ });
+
+ await allureStep('Then the inserted node has header metadata and bold formatting in output', async () => {
+ const read2 = await readFile(mgr, { session_id: sessionId, format: 'json' });
+ assertSuccess(read2, 'read2');
+ const nodes2 = JSON.parse(String(read2.content)) as Array<{ id: string; header: string; text: string }>;
+ const insertedNode = nodes2.find((n) => n.id === insertedId);
+ expect(insertedNode).toBeTruthy();
+ expect(insertedNode!.header).toBe('Security Incidents');
+
+ const outPath = path.join(tmpDir, 'out.docx');
+ const saved = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: outPath,
+ clean_bookmarks: true,
+ save_format: 'clean',
+ });
+ assertSuccess(saved, 'save');
+
+ const { runs, runText, hasBold } = await parseOutputXml(outPath);
+ const headerRun = runs.find((r) => runText(r) === 'Security Incidents:');
+ expect(headerRun).toBeTruthy();
+ expect(hasBold(headerRun!)).toBe(true);
+ });
});
humanReadableTest.openspec('header semantics accepted via tags for backward compatibility')('Scenario: header semantics accepted via tags for backward compatibility', async () => {
- const xml =
- `` +
- `` +
- `` +
- `` +
- `Security Incidents:` +
- ` Existing process.` +
- `` +
- `Placeholder A` +
- `Placeholder B` +
- ``;
-
- const { mgr, sessionId, tmpDir } = await openSession([], { xml, prefix: 'safe-docx-header-tags-' });
- const outPath = path.join(tmpDir, 'out.docx');
-
- const read = await readFile(mgr, { session_id: sessionId, format: 'json' });
- assertSuccess(read, 'read');
- const nodes = JSON.parse(String(read.content)) as Array<{ id: string; clean_text: string }>;
- const aId = nodes.find((n) => n.clean_text.includes('Placeholder A'))?.id;
- const bId = nodes.find((n) => n.clean_text.includes('Placeholder B'))?.id;
- expect(aId).toMatch(/^_bk_[0-9a-f]{12}$/);
- expect(bId).toMatch(/^_bk_[0-9a-f]{12}$/);
-
- const editA = await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: aId!,
- old_string: 'Placeholder A',
- new_string: ' Recipient must notify promptly.',
- instruction: 'header compatibility tag',
- });
- assertSuccess(editA, 'editA');
-
- const editB = await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: bId!,
- old_string: 'Placeholder B',
- new_string: 'Security Incidents: Recipient must escalate promptly.',
- instruction: 'run-in header compatibility tag',
- });
- assertSuccess(editB, 'editB');
-
- const read2 = await readFile(mgr, { session_id: sessionId, format: 'json' });
- assertSuccess(read2, 'read2');
- const nodes2 = JSON.parse(String(read2.content)) as Array<{ id: string; header: string; text: string }>;
- const nodeA = nodes2.find((n) => n.id === aId);
- const nodeB = nodes2.find((n) => n.id === bId);
- expect(nodeA).toBeTruthy();
- expect(nodeB).toBeTruthy();
- expect(nodeA!.header).toBe('Security Incidents');
- expect(nodeB!.header).toBe('Security Incidents');
-
- const readToon = await readFile(mgr, { session_id: sessionId });
- assertSuccess(readToon, 'read TOON');
- const rowA = String(readToon.content)
- .split('\n')
- .find((line) => line.startsWith(`${aId} |`));
- const rowB = String(readToon.content)
- .split('\n')
- .find((line) => line.startsWith(`${bId} |`));
- expect(rowA).toBeTruthy();
- expect(rowB).toBeTruthy();
- const colsA = rowA!.split('|').map((c) => c.trim());
- const colsB = rowB!.split('|').map((c) => c.trim());
- expect(colsA[2]).toBe('Security Incidents');
- expect(colsB[2]).toBe('Security Incidents');
- expect(colsA[4]).toContain('Recipient must notify promptly.');
- expect(colsB[4]).toContain('Recipient must escalate promptly.');
- expect(colsA[4]).not.toContain('Security Incidents:');
- expect(colsB[4]).not.toContain('Security Incidents:');
-
- const saved = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: outPath,
- clean_bookmarks: true,
- save_format: 'clean',
- });
- assertSuccess(saved, 'save');
-
- const { runs, runText, hasBold } = await parseOutputXml(outPath);
-
- const headerRuns = runs.filter((r) => runText(r) === 'Security Incidents:');
- expect(headerRuns.length).toBeGreaterThanOrEqual(3);
- expect(headerRuns.every((r) => hasBold(r))).toBe(true);
+ const { mgr, sessionId, tmpDir, aId, bId } = await allureStep('Given a doc with header paragraph and two placeholders', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `` +
+ `Security Incidents:` +
+ ` Existing process.` +
+ `` +
+ `Placeholder A` +
+ `Placeholder B` +
+ ``;
+ const result = await openSession([], { xml, prefix: 'safe-docx-header-tags-' });
+ const read = await readFile(result.mgr, { session_id: result.sessionId, format: 'json' });
+ assertSuccess(read, 'read');
+ const nodes = JSON.parse(String(read.content)) as Array<{ id: string; clean_text: string }>;
+ const aId = nodes.find((n) => n.clean_text.includes('Placeholder A'))?.id;
+ const bId = nodes.find((n) => n.clean_text.includes('Placeholder B'))?.id;
+ expect(aId).toMatch(/^_bk_[0-9a-f]{12}$/);
+ expect(bId).toMatch(/^_bk_[0-9a-f]{12}$/);
+ return { mgr: result.mgr, sessionId: result.sessionId, tmpDir: result.tmpDir, aId: aId!, bId: bId! };
+ });
+
+ await allureStep('When placeholders are replaced using and tags', async () => {
+ const editA = await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: aId,
+ old_string: 'Placeholder A',
+ new_string: ' Recipient must notify promptly.',
+ instruction: 'header compatibility tag',
+ });
+ assertSuccess(editA, 'editA');
+ const editB = await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: bId,
+ old_string: 'Placeholder B',
+ new_string: 'Security Incidents: Recipient must escalate promptly.',
+ instruction: 'run-in header compatibility tag',
+ });
+ assertSuccess(editB, 'editB');
+ });
+
+ await allureStep('Then JSON and TOON reads show header metadata correctly', async () => {
+ const read2 = await readFile(mgr, { session_id: sessionId, format: 'json' });
+ assertSuccess(read2, 'read2');
+ const nodes2 = JSON.parse(String(read2.content)) as Array<{ id: string; header: string; text: string }>;
+ const nodeA = nodes2.find((n) => n.id === aId);
+ const nodeB = nodes2.find((n) => n.id === bId);
+ expect(nodeA).toBeTruthy();
+ expect(nodeB).toBeTruthy();
+ expect(nodeA!.header).toBe('Security Incidents');
+ expect(nodeB!.header).toBe('Security Incidents');
+
+ const readToon = await readFile(mgr, { session_id: sessionId });
+ assertSuccess(readToon, 'read TOON');
+ const rowA = String(readToon.content)
+ .split('\n')
+ .find((line) => line.startsWith(`${aId} |`));
+ const rowB = String(readToon.content)
+ .split('\n')
+ .find((line) => line.startsWith(`${bId} |`));
+ expect(rowA).toBeTruthy();
+ expect(rowB).toBeTruthy();
+ const colsA = rowA!.split('|').map((c) => c.trim());
+ const colsB = rowB!.split('|').map((c) => c.trim());
+ expect(colsA[2]).toBe('Security Incidents');
+ expect(colsB[2]).toBe('Security Incidents');
+ expect(colsA[4]).toContain('Recipient must notify promptly.');
+ expect(colsB[4]).toContain('Recipient must escalate promptly.');
+ expect(colsA[4]).not.toContain('Security Incidents:');
+ expect(colsB[4]).not.toContain('Security Incidents:');
+ });
+
+ await allureStep('Then saved output has bold header runs', async () => {
+ const outPath = path.join(tmpDir, 'out.docx');
+ const saved = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: outPath,
+ clean_bookmarks: true,
+ save_format: 'clean',
+ });
+ assertSuccess(saved, 'save');
+ const { runs, runText, hasBold } = await parseOutputXml(outPath);
+ const headerRuns = runs.filter((r) => runText(r) === 'Security Incidents:');
+ expect(headerRuns.length).toBeGreaterThanOrEqual(3);
+ expect(headerRuns.every((r) => hasBold(r))).toBe(true);
+ });
});
humanReadableTest.openspec('field-aware visible text does not destroy fields')('Scenario: field-aware visible text does not destroy fields', async () => {
- const xml =
- `` +
- `` +
- `` +
- `` +
- `Amount: ` +
- `` +
- ` MERGEFIELD Amount ` +
- `` +
- `100` +
- `` +
- ` due.` +
- `` +
- ``;
-
- const { mgr, sessionId, firstParaId: paraId } = await openSession([], { xml, prefix: 'safe-docx-field-' });
-
- const edited = await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: 'Amount: 100 due.',
- new_string: 'Amount: 250 due.',
- instruction: 'field-aware refusal',
- });
- assertFailure(edited, 'EDIT_ERROR', 'edit');
- expect(edited.error.message).toContain('unsupported');
+ const { mgr, sessionId, paraId } = await allureStep('Given a paragraph with a MERGEFIELD', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `` +
+ `Amount: ` +
+ `` +
+ ` MERGEFIELD Amount ` +
+ `` +
+ `100` +
+ `` +
+ ` due.` +
+ `` +
+ ``;
+ const result = await openSession([], { xml, prefix: 'safe-docx-field-' });
+ return { mgr: result.mgr, sessionId: result.sessionId, paraId: result.firstParaId };
+ });
+
+ const edited = await allureStep('When replace_text targets text spanning the field', async () => {
+ return replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: 'Amount: 100 due.',
+ new_string: 'Amount: 250 due.',
+ instruction: 'field-aware refusal',
+ });
+ });
+
+ await allureStep('Then an EDIT_ERROR with unsupported message is returned', async () => {
+ assertFailure(edited, 'EDIT_ERROR', 'edit');
+ expect(edited.error.message).toContain('unsupported');
+ });
});
humanReadableTest.openspec('pagination rules deterministic for zero offset')('Scenario: pagination rules deterministic for offset=0', async () => {
- const { mgr, sessionId } = await openSession(['A', 'B']);
+ const { mgr, sessionId } = await allureStep('Given a session with two paragraphs', async () => {
+ return openSession(['A', 'B']);
+ });
+
+ const read = await allureStep('When read_file is called with offset=0, limit=1', async () => {
+ return readFile(mgr, { session_id: sessionId, offset: 0, limit: 1, format: 'simple' });
+ });
- const read = await readFile(mgr, { session_id: sessionId, offset: 0, limit: 1, format: 'simple' });
- assertSuccess(read, 'read');
- expect(String(read.content)).toContain(' | A');
+ await allureStep('Then only the first paragraph is returned', async () => {
+ assertSuccess(read, 'read');
+ expect(String(read.content)).toContain(' | A');
+ });
});
humanReadableTest.openspec('post-edit invariants prevent empty paragraph stubs')('Scenario: post-edit invariants prevent empty paragraph stubs', async () => {
- const xml =
- `` +
- `` +
- `` +
- `[PLACEHOLDER]` +
- ``;
-
- const { mgr, sessionId, tmpDir, firstParaId: paraId } = await openSession([], { xml, prefix: 'safe-docx-posthook-' });
- const outPath = path.join(tmpDir, 'out.docx');
-
- const edited = await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: '[PLACEHOLDER]',
- new_string: 'X',
- instruction: 'cleanup empty runs',
- });
- assertSuccess(edited, 'edit');
-
- const inserted = await insertParagraph(mgr, {
- session_id: sessionId,
- positional_anchor_node_id: paraId,
- new_string: 'Next',
- instruction: 'ensure paragraph integrity',
- position: 'AFTER',
- });
- assertSuccess(inserted, 'insert');
-
- const saved = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: outPath,
- clean_bookmarks: true,
- save_format: 'clean',
- });
- assertSuccess(saved, 'save');
-
- const { runs } = await parseOutputXml(outPath);
- const W_NS = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main';
- for (const run of runs) {
- const nonRPrChildren = Array.from(run.childNodes).filter((child) => {
- if (child.nodeType !== 1) return false;
- const elementChild = child as Element;
- return !(elementChild.namespaceURI === W_NS && elementChild.localName === 'rPr');
+ const { mgr, sessionId, tmpDir, paraId } = await allureStep('Given a doc with a bold placeholder paragraph', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `[PLACEHOLDER]` +
+ ``;
+ const result = await openSession([], { xml, prefix: 'safe-docx-posthook-' });
+ return { mgr: result.mgr, sessionId: result.sessionId, tmpDir: result.tmpDir, paraId: result.firstParaId };
+ });
+
+ await allureStep('When the placeholder is replaced and a paragraph inserted', async () => {
+ const edited = await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: '[PLACEHOLDER]',
+ new_string: 'X',
+ instruction: 'cleanup empty runs',
});
- expect(nonRPrChildren.length).toBeGreaterThan(0);
- }
+ assertSuccess(edited, 'edit');
+ const inserted = await insertParagraph(mgr, {
+ session_id: sessionId,
+ positional_anchor_node_id: paraId,
+ new_string: 'Next',
+ instruction: 'ensure paragraph integrity',
+ position: 'AFTER',
+ });
+ assertSuccess(inserted, 'insert');
+ });
+
+ await allureStep('Then saved output has no empty run stubs', async () => {
+ const outPath = path.join(tmpDir, 'out.docx');
+ const saved = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: outPath,
+ clean_bookmarks: true,
+ save_format: 'clean',
+ });
+ assertSuccess(saved, 'save');
+ const { runs } = await parseOutputXml(outPath);
+ const W_NS = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main';
+ for (const run of runs) {
+ const nonRPrChildren = Array.from(run.childNodes).filter((child) => {
+ if (child.nodeType !== 1) return false;
+ const elementChild = child as Element;
+ return !(elementChild.namespaceURI === W_NS && elementChild.localName === 'rPr');
+ });
+ expect(nonRPrChildren.length).toBeGreaterThan(0);
+ }
+ });
});
});
diff --git a/packages/docx-mcp/src/testing/add_attach_pretty_json_helper.test.ts b/packages/docx-mcp/src/testing/add_attach_pretty_json_helper.test.ts
index 6dd680b..7100385 100644
--- a/packages/docx-mcp/src/testing/add_attach_pretty_json_helper.test.ts
+++ b/packages/docx-mcp/src/testing/add_attach_pretty_json_helper.test.ts
@@ -1,6 +1,7 @@
import { afterEach, beforeEach, describe, expect } from 'vitest';
import {
itAllure,
+ allureStep,
type AllureBddContext,
type AllureRuntime,
type AllureStepContext,
@@ -68,61 +69,88 @@ describe('OpenSpec traceability: add-attach-pretty-json-helper', () => {
test
.openspec('attachPrettyJson renders formatted JSON inline')
('Scenario: attachPrettyJson renders formatted JSON inline', async ({ attachPrettyJson }: AllureBddContext) => {
- await attachPrettyJson('Pretty JSON attachment', {
+ const payload = await allureStep('Given a JSON object with diagnostics', async () => ({
patch_id: 'patch-001',
diagnostics: { ok: true },
+ }));
+
+ await allureStep('When attachPrettyJson is called', async () => {
+ await attachPrettyJson('Pretty JSON attachment', payload);
});
- expect(attachments).toHaveLength(1);
- expect(attachments[0]?.contentType).toBe('text/html');
- const html = String(attachments[0]?.content ?? '');
- expect(html).toContain('allure-auto-size-root');
- expect(html).toContain('json-source');
- expect(html).toContain('patch-001');
+ await allureStep('Then the attachment is formatted HTML with JSON content', async () => {
+ expect(attachments).toHaveLength(1);
+ expect(attachments[0]?.contentType).toBe('text/html');
+ const html = String(attachments[0]?.content ?? '');
+ expect(html).toContain('allure-auto-size-root');
+ expect(html).toContain('json-source');
+ expect(html).toContain('patch-001');
+ });
});
test
.openspec('debug JSON final-step label remains neutral')
('Scenario: debug JSON final-step label remains neutral', async ({ attachJsonLastStep }: AllureBddContext) => {
- await attachJsonLastStep({
+ const debugPayload = await allureStep('Given a debug payload with context and result', async () => ({
context: { action: 'validate' },
result: { ok: true },
attachAsStep: true,
+ }));
+
+ await allureStep('When attachJsonLastStep is called', async () => {
+ await attachJsonLastStep(debugPayload);
});
- expect(stepNames).toContain('Attach debug JSON (context + result)');
- expect(stepNames.every((name) => !name.startsWith('AND:'))).toBe(true);
- expect(attachments).toHaveLength(2);
+ await allureStep('Then the step label is neutral and attachments are created', async () => {
+ expect(stepNames).toContain('Attach debug JSON (context + result)');
+ expect(stepNames.every((name) => !name.startsWith('AND:'))).toBe(true);
+ expect(attachments).toHaveLength(2);
+ });
});
test
.openspec('short HTML attachment auto-fits without vertical scrollbar')
('Scenario: short HTML attachment auto-fits without vertical scrollbar', async () => {
- const runtimeTemplate = readFileSync(
- new URL('../../../../scripts/branding/runtime.template.js', import.meta.url),
- 'utf-8',
- );
- const themeCss = readFileSync(
- new URL('../../../../scripts/branding/theme.template.css', import.meta.url),
- 'utf-8',
- );
+ const { runtimeTemplate, themeCss } = await allureStep('Given the branding template files', async () => {
+ const runtimeTemplate = readFileSync(
+ new URL('../../../../scripts/branding/runtime.template.js', import.meta.url),
+ 'utf-8',
+ );
+ const themeCss = readFileSync(
+ new URL('../../../../scripts/branding/theme.template.css', import.meta.url),
+ 'utf-8',
+ );
+ return { runtimeTemplate, themeCss };
+ });
- expect(runtimeTemplate).toContain("overflowNeeded ? 'auto' : 'hidden'");
- expect(runtimeTemplate).toContain('contentTarget = Math.max(min, contentHeight + 8)');
- expect(themeCss).toContain('max-height: 72vh');
- expect(themeCss).toContain('overflow-y: hidden');
+ await allureStep('When inspecting the runtime resize logic', async () => {
+ expect(runtimeTemplate).toContain("overflowNeeded ? 'auto' : 'hidden'");
+ expect(runtimeTemplate).toContain('contentTarget = Math.max(min, contentHeight + 8)');
+ });
+
+ await allureStep('Then short content hides the scrollbar by default', async () => {
+ expect(themeCss).toContain('max-height: 72vh');
+ expect(themeCss).toContain('overflow-y: hidden');
+ });
});
test
.openspec('tall HTML attachment uses single vertical scrollbar')
('Scenario: tall HTML attachment uses single vertical scrollbar', async () => {
- const runtimeTemplate = readFileSync(
- new URL('../../../../scripts/branding/runtime.template.js', import.meta.url),
- 'utf-8',
+ const runtimeTemplate = await allureStep('Given the runtime template', async () =>
+ readFileSync(
+ new URL('../../../../scripts/branding/runtime.template.js', import.meta.url),
+ 'utf-8',
+ ),
);
- expect(runtimeTemplate).toContain("preview.style.setProperty('overflow-y', overflowNeeded ? 'auto' : 'hidden', 'important')");
- expect(runtimeTemplate).toContain("preview.style.setProperty('overflow-x', 'hidden', 'important')");
- expect(runtimeTemplate).toContain("frame.setAttribute('scrolling', 'no')");
+ await allureStep('When the content overflows', async () => {
+ expect(runtimeTemplate).toContain("preview.style.setProperty('overflow-y', overflowNeeded ? 'auto' : 'hidden', 'important')");
+ });
+
+ await allureStep('Then only a single vertical scrollbar is used', async () => {
+ expect(runtimeTemplate).toContain("preview.style.setProperty('overflow-x', 'hidden', 'important')");
+ expect(runtimeTemplate).toContain("frame.setAttribute('scrolling', 'no')");
+ });
});
});
diff --git a/packages/docx-mcp/src/tools/add_apply_plan_and_style_source.test.ts b/packages/docx-mcp/src/tools/add_apply_plan_and_style_source.test.ts
index 1afbc9b..a241de3 100644
--- a/packages/docx-mcp/src/tools/add_apply_plan_and_style_source.test.ts
+++ b/packages/docx-mcp/src/tools/add_apply_plan_and_style_source.test.ts
@@ -3,7 +3,7 @@ import path from 'node:path';
import { describe, expect } from 'vitest';
import { MCP_TOOLS } from '../server.js';
-import { testAllure } from '../testing/allure-test.js';
+import { testAllure, allureStep } from '../testing/allure-test.js';
import {
assertFailure,
assertSuccess,
@@ -29,416 +29,492 @@ describe('Traceability: apply_plan + style_source_id', () => {
humanReadableTest.openspec('successful apply executes all steps')(
'Scenario: successful apply executes all steps',
async () => {
- const opened = await openSession(['Hello world', 'Second paragraph']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [
- {
- step_id: 's1',
- operation: 'replace_text',
- target_paragraph_id: opened.paraIds[0],
- old_string: 'Hello world',
- new_string: 'Hello earth',
- instruction: 'replace first paragraph text',
- },
- {
- step_id: 's2',
- operation: 'replace_text',
- target_paragraph_id: opened.paraIds[1],
- old_string: 'Second paragraph',
- new_string: 'Updated paragraph',
- instruction: 'replace second paragraph text',
- },
- ],
+ const result = await allureStep('Given a session with two paragraphs and a two-step replace plan', async () => {
+ const opened = await openSession(['Hello world', 'Second paragraph']);
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'replace_text',
+ target_paragraph_id: opened.paraIds[0],
+ old_string: 'Hello world',
+ new_string: 'Hello earth',
+ instruction: 'replace first paragraph text',
+ },
+ {
+ step_id: 's2',
+ operation: 'replace_text',
+ target_paragraph_id: opened.paraIds[1],
+ old_string: 'Second paragraph',
+ new_string: 'Updated paragraph',
+ instruction: 'replace second paragraph text',
+ },
+ ],
+ });
});
- assertSuccess(result);
- expect(result.completed_count).toBe(2);
- expect(result.completed_step_ids).toEqual(['s1', 's2']);
+ await allureStep('Then both steps complete successfully', () => {
+ assertSuccess(result);
+ expect(result.completed_count).toBe(2);
+ expect(result.completed_step_ids).toEqual(['s1', 's2']);
+ });
},
);
humanReadableTest.openspec('validation failure returns all errors without applying')(
'Scenario: validation failure returns all errors without applying',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [
- {
- step_id: 's1',
- operation: 'replace_text',
- target_paragraph_id: '_bk_missing_1',
- old_string: 'Hello',
- new_string: 'Hi',
- instruction: 'invalid replacement target',
- },
- {
- step_id: 's2',
- operation: 'insert_paragraph',
- positional_anchor_node_id: '_bk_missing_2',
- new_string: 'Inserted paragraph',
- instruction: 'invalid insert anchor',
- },
- ],
+ const { opened, result } = await allureStep('Given a plan with two steps targeting missing paragraphs', async () => {
+ const opened = await openSession(['Hello world']);
+ const result = await applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'replace_text',
+ target_paragraph_id: '_bk_missing_1',
+ old_string: 'Hello',
+ new_string: 'Hi',
+ instruction: 'invalid replacement target',
+ },
+ {
+ step_id: 's2',
+ operation: 'insert_paragraph',
+ positional_anchor_node_id: '_bk_missing_2',
+ new_string: 'Inserted paragraph',
+ instruction: 'invalid insert anchor',
+ },
+ ],
+ });
+ return { opened, result };
});
- assertFailure(result, 'VALIDATION_FAILED');
- const steps = (result as { steps?: Array<{ step_id: string; valid: boolean; errors: string[] }> }).steps;
- expect(steps).toHaveLength(2);
- expect(steps?.[0]?.valid).toBe(false);
- expect(steps?.[1]?.valid).toBe(false);
+ await allureStep('Then validation fails and both steps are marked invalid', () => {
+ assertFailure(result, 'VALIDATION_FAILED');
+ const steps = (result as { steps?: Array<{ step_id: string; valid: boolean; errors: string[] }> }).steps;
+ expect(steps).toHaveLength(2);
+ expect(steps?.[0]?.valid).toBe(false);
+ expect(steps?.[1]?.valid).toBe(false);
+ });
- const read = await readFile(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(read);
- expect(String(read.content)).toContain('Hello world');
- expect(String(read.content)).not.toContain('Inserted paragraph');
+ await allureStep('Then the document content is unchanged', async () => {
+ const read = await readFile(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(read);
+ expect(String(read.content)).toContain('Hello world');
+ expect(String(read.content)).not.toContain('Inserted paragraph');
+ });
},
);
humanReadableTest.openspec('partial apply failure stops on first error')(
'Scenario: partial apply failure stops on first error',
async () => {
- const opened = await openSession(['Hello world', 'Second paragraph']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [
- {
- step_id: 's1',
- operation: 'replace_text',
- target_paragraph_id: opened.paraIds[0],
- old_string: 'Hello world',
- new_string: 'Hello earth',
- instruction: 'first edit succeeds',
- },
- {
- step_id: 's2',
- operation: 'replace_text',
- target_paragraph_id: opened.paraIds[0],
- old_string: 'Hello world',
- new_string: 'Should fail on execution',
- instruction: 'fails after step 1 changes text',
- },
- {
- step_id: 's3',
- operation: 'replace_text',
- target_paragraph_id: opened.paraIds[1],
- old_string: 'Second paragraph',
- new_string: 'Should not run',
- instruction: 'must not execute',
- },
- ],
+ const { opened, result } = await allureStep('Given a three-step plan where step 2 targets stale text', async () => {
+ const opened = await openSession(['Hello world', 'Second paragraph']);
+ const result = await applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'replace_text',
+ target_paragraph_id: opened.paraIds[0],
+ old_string: 'Hello world',
+ new_string: 'Hello earth',
+ instruction: 'first edit succeeds',
+ },
+ {
+ step_id: 's2',
+ operation: 'replace_text',
+ target_paragraph_id: opened.paraIds[0],
+ old_string: 'Hello world',
+ new_string: 'Should fail on execution',
+ instruction: 'fails after step 1 changes text',
+ },
+ {
+ step_id: 's3',
+ operation: 'replace_text',
+ target_paragraph_id: opened.paraIds[1],
+ old_string: 'Second paragraph',
+ new_string: 'Should not run',
+ instruction: 'must not execute',
+ },
+ ],
+ });
+ return { opened, result };
});
- assertFailure(result, 'APPLY_PARTIAL_FAILURE');
- expect(result.completed_count).toBe(1);
- expect(result.completed_step_ids).toEqual(['s1']);
- expect(result.failed_step_id).toBe('s2');
- expect(result.failed_step_index).toBe(1);
+ await allureStep('Then apply stops at step 2 with partial failure', () => {
+ assertFailure(result, 'APPLY_PARTIAL_FAILURE');
+ expect(result.completed_count).toBe(1);
+ expect(result.completed_step_ids).toEqual(['s1']);
+ expect(result.failed_step_id).toBe('s2');
+ expect(result.failed_step_index).toBe(1);
+ });
- const read = await readFile(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(read);
- expect(String(read.content)).toContain('Hello earth');
- expect(String(read.content)).not.toContain('Should not run');
+ await allureStep('Then only step 1 changes are present in the document', async () => {
+ const read = await readFile(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(read);
+ expect(String(read.content)).toContain('Hello earth');
+ expect(String(read.content)).not.toContain('Should not run');
+ });
},
);
humanReadableTest.openspec('step normalization accepts raw format')(
'Scenario: step normalization accepts raw format',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [
- {
- step_id: 's1',
- operation: 'replace_text',
- target_paragraph_id: opened.firstParaId,
- old_string: 'Hello world',
- new_string: 'Hello raw format',
- instruction: 'raw format step',
- },
- ],
+ const result = await allureStep('Given a plan with a raw-format step', async () => {
+ const opened = await openSession(['Hello world']);
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'replace_text',
+ target_paragraph_id: opened.firstParaId,
+ old_string: 'Hello world',
+ new_string: 'Hello raw format',
+ instruction: 'raw format step',
+ },
+ ],
+ });
});
- assertSuccess(result);
- expect(result.completed_count).toBe(1);
- expect(result.completed_step_ids).toEqual(['s1']);
+ await allureStep('Then the raw-format step completes successfully', () => {
+ assertSuccess(result);
+ expect(result.completed_count).toBe(1);
+ expect(result.completed_step_ids).toEqual(['s1']);
+ });
},
);
humanReadableTest.openspec('step normalization accepts merged format')(
'Scenario: step normalization accepts merged format',
async () => {
- const opened = await openSession(['Hello world']);
- const merged = await mergePlans({
- plans: [
- {
- plan_id: 'plan-a',
- steps: [
- {
- step_id: 's1',
- operation: 'replace_text',
- target_paragraph_id: opened.firstParaId,
- old_string: 'Hello world',
- new_string: 'Hello merged format',
- instruction: 'merged format step',
- range: { start: 0, end: 11 },
- },
- ],
- },
- ],
+ const result = await allureStep('Given a plan whose steps come from mergePlans output', async () => {
+ const opened = await openSession(['Hello world']);
+ const merged = await mergePlans({
+ plans: [
+ {
+ plan_id: 'plan-a',
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'replace_text',
+ target_paragraph_id: opened.firstParaId,
+ old_string: 'Hello world',
+ new_string: 'Hello merged format',
+ instruction: 'merged format step',
+ range: { start: 0, end: 11 },
+ },
+ ],
+ },
+ ],
+ });
+ assertSuccess(merged);
+
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: (merged.merged_plan as { steps: unknown[] }).steps,
+ });
});
- assertSuccess(merged);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: (merged.merged_plan as { steps: unknown[] }).steps,
+ await allureStep('Then the merged-format step completes successfully', () => {
+ assertSuccess(result);
+ expect(result.completed_count).toBe(1);
+ expect(result.completed_step_ids).toEqual(['s1']);
});
-
- assertSuccess(result);
- expect(result.completed_count).toBe(1);
- expect(result.completed_step_ids).toEqual(['s1']);
},
);
humanReadableTest.openspec('__proto__ in step fields is rejected')(
'Scenario: __proto__ in step fields is rejected',
async () => {
- const opened = await openSession(['Hello world']);
- const steps = [{ step_id: 's1', operation: 'replace_text', __proto__: {} }];
- const rawSteps = JSON.parse(JSON.stringify(steps));
- Object.defineProperty(rawSteps[0], '__proto__', {
- value: { polluted: true },
- enumerable: true,
- configurable: true,
- writable: true,
+ const result = await allureStep('Given a plan step containing a __proto__ field', async () => {
+ const opened = await openSession(['Hello world']);
+ const steps = [{ step_id: 's1', operation: 'replace_text', __proto__: {} }];
+ const rawSteps = JSON.parse(JSON.stringify(steps));
+ Object.defineProperty(rawSteps[0], '__proto__', {
+ value: { polluted: true },
+ enumerable: true,
+ configurable: true,
+ writable: true,
+ });
+
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: rawSteps,
+ });
});
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: rawSteps,
+ await allureStep('Then normalization rejects the step with a __proto__ error', () => {
+ assertFailure(result, 'NORMALIZATION_ERROR');
+ expect(String(result.error?.message ?? '')).toContain('__proto__');
});
-
- assertFailure(result, 'NORMALIZATION_ERROR');
- expect(String(result.error?.message ?? '')).toContain('__proto__');
},
);
humanReadableTest.openspec('plan steps loaded from file path')(
'Scenario: plan steps loaded from file path',
async () => {
- const opened = await openSession(['Hello world']);
- const tmpDir = await createTrackedTempDir('apply-plan-file-');
- const planPath = path.join(tmpDir, 'plan.json');
-
- const fileSteps = [
- {
- step_id: 's1',
- operation: 'replace_text',
- target_paragraph_id: opened.firstParaId,
- old_string: 'Hello world',
- new_string: 'Hello from plan file',
- instruction: 'load plan from file',
- },
- ];
- await fs.writeFile(planPath, JSON.stringify(fileSteps), 'utf-8');
-
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- plan_file_path: planPath,
- });
-
- assertSuccess(result);
- expect(result.completed_count).toBe(1);
- expect(result.completed_step_ids).toEqual(['s1']);
+ const result = await allureStep('Given a plan written to a JSON file and applied via plan_file_path', async () => {
+ const opened = await openSession(['Hello world']);
+ const tmpDir = await createTrackedTempDir('apply-plan-file-');
+ const planPath = path.join(tmpDir, 'plan.json');
+
+ const fileSteps = [
+ {
+ step_id: 's1',
+ operation: 'replace_text',
+ target_paragraph_id: opened.firstParaId,
+ old_string: 'Hello world',
+ new_string: 'Hello from plan file',
+ instruction: 'load plan from file',
+ },
+ ];
+ await fs.writeFile(planPath, JSON.stringify(fileSteps), 'utf-8');
+
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ plan_file_path: planPath,
+ });
+ });
+
+ await allureStep('Then the file-loaded plan step completes successfully', () => {
+ assertSuccess(result);
+ expect(result.completed_count).toBe(1);
+ expect(result.completed_step_ids).toEqual(['s1']);
+ });
},
);
humanReadableTest.openspec('error when both steps and plan_file_path supplied')(
'Scenario: error when both steps and plan_file_path supplied',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [{ step_id: 's1', operation: 'replace_text' }],
- plan_file_path: '/tmp/plan.json',
+ const result = await allureStep('Given a plan with both steps and plan_file_path supplied', async () => {
+ const opened = await openSession(['Hello world']);
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [{ step_id: 's1', operation: 'replace_text' }],
+ plan_file_path: '/tmp/plan.json',
+ });
});
- assertFailure(result, 'INVALID_PARAMS');
+ await allureStep('Then INVALID_PARAMS error is returned', () => {
+ assertFailure(result, 'INVALID_PARAMS');
+ });
},
);
humanReadableTest.openspec('unsupported operation is rejected during validation')(
'Scenario: unsupported operation is rejected during validation',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [
- {
- step_id: 's1',
- operation: 'delete_paragraph',
- target_paragraph_id: opened.firstParaId,
- instruction: 'unsupported operation',
- },
- ],
+ const result = await allureStep('Given a plan step with an unsupported operation type', async () => {
+ const opened = await openSession(['Hello world']);
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'delete_paragraph',
+ target_paragraph_id: opened.firstParaId,
+ instruction: 'unsupported operation',
+ },
+ ],
+ });
});
- assertFailure(result, 'NORMALIZATION_ERROR');
- expect(String(result.error?.message ?? '')).toContain('unsupported operation');
+ await allureStep('Then normalization rejects the unsupported operation', () => {
+ assertFailure(result, 'NORMALIZATION_ERROR');
+ expect(String(result.error?.message ?? '')).toContain('unsupported operation');
+ });
},
);
humanReadableTest.openspec('legacy aliases rejected during validation')(
'Scenario: legacy aliases rejected during validation',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [
- {
- step_id: 's1',
- operation: 'smart_edit',
- target_paragraph_id: opened.firstParaId,
- old_string: 'Hello',
- new_string: 'Hi',
- instruction: 'legacy alias',
- },
- ],
+ const result = await allureStep('Given a plan step using legacy smart_edit operation', async () => {
+ const opened = await openSession(['Hello world']);
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'smart_edit',
+ target_paragraph_id: opened.firstParaId,
+ old_string: 'Hello',
+ new_string: 'Hi',
+ instruction: 'legacy alias',
+ },
+ ],
+ });
});
- assertFailure(result, 'NORMALIZATION_ERROR');
- expect(String(result.error?.message ?? '')).toContain('legacy operation');
+ await allureStep('Then normalization rejects the legacy operation', () => {
+ assertFailure(result, 'NORMALIZATION_ERROR');
+ expect(String(result.error?.message ?? '')).toContain('legacy operation');
+ });
},
);
humanReadableTest.openspec('style_source_id clones formatting from specified paragraph')(
'Scenario: style_source_id clones formatting from specified paragraph',
async () => {
- const xml =
- `` +
- `` +
- `` +
- `Heading` +
- `Body paragraph` +
- ``;
-
- const opened = await openSession([], { xml });
- const result = await insertParagraph(opened.mgr, {
- session_id: opened.sessionId,
- positional_anchor_node_id: opened.paraIds[0],
- position: 'AFTER',
- new_string: 'Inserted with body style source',
- instruction: 'insert after heading',
- style_source_id: opened.paraIds[1],
- });
-
- assertSuccess(result);
- expect(result.style_source_warning).toBeUndefined();
+ const result = await allureStep('Given a document with Heading1 and Normal paragraphs, inserting after heading with body style_source_id', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `Heading` +
+ `Body paragraph` +
+ ``;
+
+ const opened = await openSession([], { xml });
+ return insertParagraph(opened.mgr, {
+ session_id: opened.sessionId,
+ positional_anchor_node_id: opened.paraIds[0],
+ position: 'AFTER',
+ new_string: 'Inserted with body style source',
+ instruction: 'insert after heading',
+ style_source_id: opened.paraIds[1],
+ });
+ });
+
+ await allureStep('Then the insert succeeds without style warnings', () => {
+ assertSuccess(result);
+ expect(result.style_source_warning).toBeUndefined();
+ });
},
);
humanReadableTest.openspec('style_source_id falls back to anchor with warning')(
'Scenario: style_source_id falls back to anchor with warning',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await insertParagraph(opened.mgr, {
- session_id: opened.sessionId,
- positional_anchor_node_id: opened.firstParaId,
- position: 'AFTER',
- new_string: 'Inserted with fallback',
- instruction: 'insert with missing style source',
- style_source_id: '_bk_missing_style_source',
- });
-
- assertSuccess(result);
- expect(String(result.style_source_warning ?? '')).toContain('not found');
- expect(String(result.style_source_warning ?? '')).toContain('fell back');
+ const result = await allureStep('Given an insert with a non-existent style_source_id', async () => {
+ const opened = await openSession(['Hello world']);
+ return insertParagraph(opened.mgr, {
+ session_id: opened.sessionId,
+ positional_anchor_node_id: opened.firstParaId,
+ position: 'AFTER',
+ new_string: 'Inserted with fallback',
+ instruction: 'insert with missing style source',
+ style_source_id: '_bk_missing_style_source',
+ });
+ });
+
+ await allureStep('Then the insert succeeds with a fallback warning', () => {
+ assertSuccess(result);
+ expect(String(result.style_source_warning ?? '')).toContain('not found');
+ expect(String(result.style_source_warning ?? '')).toContain('fell back');
+ });
},
);
humanReadableTest.openspec('style_source_id omitted uses anchor formatting (backward compatible)')(
'Scenario: style_source_id omitted uses anchor formatting (backward compatible)',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await insertParagraph(opened.mgr, {
- session_id: opened.sessionId,
- positional_anchor_node_id: opened.firstParaId,
- position: 'AFTER',
- new_string: 'Inserted without style source',
- instruction: 'insert with anchor style',
+ const result = await allureStep('Given an insert without style_source_id', async () => {
+ const opened = await openSession(['Hello world']);
+ return insertParagraph(opened.mgr, {
+ session_id: opened.sessionId,
+ positional_anchor_node_id: opened.firstParaId,
+ position: 'AFTER',
+ new_string: 'Inserted without style source',
+ instruction: 'insert with anchor style',
+ });
});
- assertSuccess(result);
- expect(result.style_source_warning).toBeUndefined();
+ await allureStep('Then the insert succeeds using anchor formatting with no warning', () => {
+ assertSuccess(result);
+ expect(result.style_source_warning).toBeUndefined();
+ });
},
);
humanReadableTest.openspec('canonical names are advertised')(
'Scenario: canonical names are advertised',
async () => {
- const toolNames = new Set(MCP_TOOLS.map((tool) => tool.name));
- expect(toolNames.has('replace_text')).toBe(true);
- expect(toolNames.has('insert_paragraph')).toBe(true);
+ const toolNames = await allureStep('Given the MCP_TOOLS list', () => {
+ return new Set(MCP_TOOLS.map((tool) => tool.name));
+ });
+
+ await allureStep('Then canonical tool names replace_text and insert_paragraph are present', () => {
+ expect(toolNames.has('replace_text')).toBe(true);
+ expect(toolNames.has('insert_paragraph')).toBe(true);
+ });
},
);
humanReadableTest.openspec('legacy aliases are unavailable')(
'Scenario: legacy aliases are unavailable',
async () => {
- const toolNames = new Set(MCP_TOOLS.map((tool) => tool.name));
- expect(toolNames.has('smart_edit')).toBe(false);
- expect(toolNames.has('smart_insert')).toBe(false);
+ const toolNames = await allureStep('Given the MCP_TOOLS list', () => {
+ return new Set(MCP_TOOLS.map((tool) => tool.name));
+ });
+
+ await allureStep('Then legacy aliases smart_edit and smart_insert are absent', () => {
+ expect(toolNames.has('smart_edit')).toBe(false);
+ expect(toolNames.has('smart_insert')).toBe(false);
+ });
},
);
humanReadableTest.openspec('legacy aliases are rejected inside plan operations')(
'Scenario: legacy aliases are rejected inside plan operations',
async () => {
- const result = await mergePlans({
- plans: [
- {
- plan_id: 'legacy-edit',
- steps: [
- {
- step_id: 's1',
- operation: 'smart_edit',
- target_paragraph_id: '_bk_1',
- old_string: 'old',
- new_string: 'new',
- instruction: 'legacy alias',
- },
- ],
- },
- ],
+ const result = await allureStep('Given a merge plan containing a legacy smart_edit operation', async () => {
+ return mergePlans({
+ plans: [
+ {
+ plan_id: 'legacy-edit',
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'smart_edit',
+ target_paragraph_id: '_bk_1',
+ old_string: 'old',
+ new_string: 'new',
+ instruction: 'legacy alias',
+ },
+ ],
+ },
+ ],
+ });
});
- assertFailure(result);
- const conflicts = (result as { conflicts?: Array<{ code: string }> }).conflicts ?? [];
- expect(conflicts.some((conflict) => conflict.code === 'INVALID_STEP_OPERATION')).toBe(true);
+ await allureStep('Then merge fails with INVALID_STEP_OPERATION conflict', () => {
+ assertFailure(result);
+ const conflicts = (result as { conflicts?: Array<{ code: string }> }).conflicts ?? [];
+ expect(conflicts.some((conflict) => conflict.code === 'INVALID_STEP_OPERATION')).toBe(true);
+ });
},
);
humanReadableTest.openspec('legacy aliases are rejected inside apply_plan steps')(
'Scenario: legacy aliases are rejected inside apply_plan steps',
async () => {
- const opened = await openSession(['Hello world']);
- const result = await applyPlan(opened.mgr, {
- session_id: opened.sessionId,
- steps: [
- {
- step_id: 's1',
- operation: 'smart_insert',
- positional_anchor_node_id: opened.firstParaId,
- new_string: 'Legacy alias insert',
- instruction: 'legacy alias',
- },
- ],
+ const result = await allureStep('Given a plan step using legacy smart_insert operation', async () => {
+ const opened = await openSession(['Hello world']);
+ return applyPlan(opened.mgr, {
+ session_id: opened.sessionId,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'smart_insert',
+ positional_anchor_node_id: opened.firstParaId,
+ new_string: 'Legacy alias insert',
+ instruction: 'legacy alias',
+ },
+ ],
+ });
});
- assertFailure(result, 'NORMALIZATION_ERROR');
- expect(String(result.error?.message ?? '')).toContain('legacy operation');
+ await allureStep('Then normalization rejects the legacy operation', () => {
+ assertFailure(result, 'NORMALIZATION_ERROR');
+ expect(String(result.error?.message ?? '')).toContain('legacy operation');
+ });
},
);
});
diff --git a/packages/docx-mcp/src/tools/add_footnote_support.test.ts b/packages/docx-mcp/src/tools/add_footnote_support.test.ts
index d25a49e..5e35e0e 100644
--- a/packages/docx-mcp/src/tools/add_footnote_support.test.ts
+++ b/packages/docx-mcp/src/tools/add_footnote_support.test.ts
@@ -1,5 +1,5 @@
import { describe, expect } from 'vitest';
-import { testAllure, allureJsonAttachment } from '../testing/allure-test.js';
+import { testAllure, allureJsonAttachment, allureStep } from '../testing/allure-test.js';
import {
assertFailure,
assertSuccess,
@@ -24,245 +24,300 @@ describe('OpenSpec traceability: add-footnote-support', () => {
registerCleanup();
humanReadableTest.openspec('read all footnotes')('Scenario: read all footnotes', async () => {
- const opened = await openSession([
- 'Alpha paragraph for note one.',
- 'Beta paragraph for note two.',
- ]);
-
- const addOne = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.paraIds[0]!,
- text: 'First footnote',
+ const { opened, listed } = await allureStep('Given a document with two footnotes added to two paragraphs', async () => {
+ const opened = await openSession([
+ 'Alpha paragraph for note one.',
+ 'Beta paragraph for note two.',
+ ]);
+
+ const addOne = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.paraIds[0]!,
+ text: 'First footnote',
+ });
+ assertSuccess(addOne, 'add_footnote #1');
+
+ const addTwo = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.paraIds[1]!,
+ text: 'Second footnote',
+ });
+ assertSuccess(addTwo, 'add_footnote #2');
+
+ const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(listed, 'get_footnotes');
+ await allureJsonAttachment('get_footnotes-response', listed);
+
+ return { opened, listed };
});
- assertSuccess(addOne, 'add_footnote #1');
- const addTwo = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.paraIds[1]!,
- text: 'Second footnote',
+ await allureStep('Then both footnotes are returned with correct metadata', () => {
+ const notes = listed.footnotes as Array>;
+ expect(notes).toHaveLength(2);
+ expect(notes[0]).toEqual(expect.objectContaining({
+ id: expect.any(Number),
+ display_number: 1,
+ text: expect.any(String),
+ anchored_paragraph_id: opened.paraIds[0],
+ }));
+ expect(notes[1]).toEqual(expect.objectContaining({
+ id: expect.any(Number),
+ display_number: 2,
+ text: expect.any(String),
+ anchored_paragraph_id: opened.paraIds[1],
+ }));
});
- assertSuccess(addTwo, 'add_footnote #2');
-
- const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(listed, 'get_footnotes');
- await allureJsonAttachment('get_footnotes-response', listed);
-
- const notes = listed.footnotes as Array>;
- expect(notes).toHaveLength(2);
- expect(notes[0]).toEqual(expect.objectContaining({
- id: expect.any(Number),
- display_number: 1,
- text: expect.any(String),
- anchored_paragraph_id: opened.paraIds[0],
- }));
- expect(notes[1]).toEqual(expect.objectContaining({
- id: expect.any(Number),
- display_number: 2,
- text: expect.any(String),
- anchored_paragraph_id: opened.paraIds[1],
- }));
});
humanReadableTest.openspec('empty document returns empty array')(
'Scenario: empty document returns empty array',
async () => {
- const opened = await openSession(['No footnotes in this paragraph.']);
+ const listed = await allureStep('Given a document with no footnotes', async () => {
+ const opened = await openSession(['No footnotes in this paragraph.']);
+ const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(listed, 'get_footnotes');
+ return listed;
+ });
- const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(listed, 'get_footnotes');
- expect(listed.footnotes).toEqual([]);
+ await allureStep('Then get_footnotes returns an empty array', () => {
+ expect(listed.footnotes).toEqual([]);
+ });
},
);
humanReadableTest.openspec('add footnote successfully')('Scenario: add footnote successfully', async () => {
- const opened = await openSession(['Contract text paragraph.']);
-
- const result = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- text: 'Tool-level note',
+ const { opened, result } = await allureStep('Given a session with a footnote added to a paragraph', async () => {
+ const opened = await openSession(['Contract text paragraph.']);
+ const result = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ text: 'Tool-level note',
+ });
+ return { opened, result };
});
- assertSuccess(result, 'add_footnote');
- expect(result.note_id).toBeTypeOf('number');
- expect(result.session_id).toBe(opened.sessionId);
+ await allureStep('Then add_footnote succeeds and returns a numeric note_id', () => {
+ assertSuccess(result, 'add_footnote');
+ expect(result.note_id).toBeTypeOf('number');
+ expect(result.session_id).toBe(opened.sessionId);
+ });
});
humanReadableTest.openspec('error when anchor paragraph not found')(
'Scenario: error when anchor paragraph not found',
async () => {
- const opened = await openSession(['Anchor validation.']);
-
- const result = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: '_bk_missing',
- text: 'Should fail',
+ const result = await allureStep('Given an add_footnote call targeting a non-existent paragraph', async () => {
+ const opened = await openSession(['Anchor validation.']);
+ return addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: '_bk_missing',
+ text: 'Should fail',
+ });
});
- assertFailure(result, 'ANCHOR_NOT_FOUND', 'add_footnote');
+ await allureStep('Then the result is an ANCHOR_NOT_FOUND error', () => {
+ assertFailure(result, 'ANCHOR_NOT_FOUND', 'add_footnote');
+ });
},
);
humanReadableTest.openspec('error when after_text not found')(
'Scenario: error when after_text not found',
async () => {
- const opened = await openSession(['Anchor text present here.']);
-
- const result = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- after_text: 'Not in paragraph',
- text: 'Should fail',
+ const result = await allureStep('Given an add_footnote call with after_text that does not exist in the paragraph', async () => {
+ const opened = await openSession(['Anchor text present here.']);
+ return addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ after_text: 'Not in paragraph',
+ text: 'Should fail',
+ });
});
- assertFailure(result, 'TEXT_NOT_FOUND', 'add_footnote');
+ await allureStep('Then the result is a TEXT_NOT_FOUND error', () => {
+ assertFailure(result, 'TEXT_NOT_FOUND', 'add_footnote');
+ });
},
);
humanReadableTest.openspec('update footnote successfully')(
'Scenario: update footnote successfully',
async () => {
- const opened = await openSession(['Update flow paragraph.']);
- const created = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- text: 'Old note body',
+ const listed = await allureStep('Given a footnote is created and then updated with new text', async () => {
+ const opened = await openSession(['Update flow paragraph.']);
+ const created = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ text: 'Old note body',
+ });
+ assertSuccess(created, 'add_footnote');
+
+ const updated = await updateFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ note_id: created.note_id as number,
+ new_text: 'Updated note body',
+ });
+ assertSuccess(updated, 'update_footnote');
+
+ const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(listed, 'get_footnotes');
+ return listed;
});
- assertSuccess(created, 'add_footnote');
- const updated = await updateFootnote(opened.mgr, {
- session_id: opened.sessionId,
- note_id: created.note_id as number,
- new_text: 'Updated note body',
+ await allureStep('Then the footnote text reflects the update', () => {
+ const first = (listed.footnotes as Array>)[0]!;
+ expect(String(first.text)).toContain('Updated note body');
});
- assertSuccess(updated, 'update_footnote');
-
- const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(listed, 'get_footnotes');
- const first = (listed.footnotes as Array>)[0]!;
- expect(String(first.text)).toContain('Updated note body');
},
);
humanReadableTest.openspec('error when note not found')('Scenario: error when note not found', async () => {
- const opened = await openSession(['Missing-note validation paragraph.']);
-
- const result = await updateFootnote(opened.mgr, {
- session_id: opened.sessionId,
- note_id: 999999,
- new_text: 'No-op',
+ const result = await allureStep('Given an update_footnote call with a non-existent note_id', async () => {
+ const opened = await openSession(['Missing-note validation paragraph.']);
+ return updateFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ note_id: 999999,
+ new_text: 'No-op',
+ });
});
- assertFailure(result, 'NOTE_NOT_FOUND', 'update_footnote');
+ await allureStep('Then the result is a NOTE_NOT_FOUND error', () => {
+ assertFailure(result, 'NOTE_NOT_FOUND', 'update_footnote');
+ });
});
humanReadableTest.openspec('delete footnote successfully')(
'Scenario: delete footnote successfully',
async () => {
- const opened = await openSession(['Delete flow paragraph.']);
- const created = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- text: 'Delete me',
+ const listed = await allureStep('Given a footnote is created and then deleted', async () => {
+ const opened = await openSession(['Delete flow paragraph.']);
+ const created = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ text: 'Delete me',
+ });
+ assertSuccess(created, 'add_footnote');
+
+ const deleted = await deleteFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ note_id: created.note_id as number,
+ });
+ assertSuccess(deleted, 'delete_footnote');
+
+ const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(listed, 'get_footnotes');
+ return listed;
});
- assertSuccess(created, 'add_footnote');
- const deleted = await deleteFootnote(opened.mgr, {
- session_id: opened.sessionId,
- note_id: created.note_id as number,
+ await allureStep('Then get_footnotes returns an empty array', () => {
+ expect(listed.footnotes).toEqual([]);
});
- assertSuccess(deleted, 'delete_footnote');
-
- const listed = await getFootnotes(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(listed, 'get_footnotes');
- expect(listed.footnotes).toEqual([]);
},
);
humanReadableTest.openspec('error when note not found')(
'Scenario: error when note not found',
async () => {
- const opened = await openSession(['Delete-missing validation paragraph.']);
-
- const result = await deleteFootnote(opened.mgr, {
- session_id: opened.sessionId,
- note_id: 123456,
+ const result = await allureStep('Given a delete_footnote call with a non-existent note_id', async () => {
+ const opened = await openSession(['Delete-missing validation paragraph.']);
+ return deleteFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ note_id: 123456,
+ });
});
- assertFailure(result, 'NOTE_NOT_FOUND', 'delete_footnote');
+ await allureStep('Then the result is a NOTE_NOT_FOUND error', () => {
+ assertFailure(result, 'NOTE_NOT_FOUND', 'delete_footnote');
+ });
},
);
humanReadableTest.openspec('error when deleting reserved type')(
'Scenario: error when deleting reserved type',
async () => {
- const opened = await openSession(['Reserved delete validation paragraph.']);
- const seeded = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- text: 'Seed real note to bootstrap footnotes.xml',
+ const result = await allureStep('Given a delete_footnote call targeting a reserved note_id (-1)', async () => {
+ const opened = await openSession(['Reserved delete validation paragraph.']);
+ const seeded = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ text: 'Seed real note to bootstrap footnotes.xml',
+ });
+ assertSuccess(seeded, 'add_footnote');
+
+ return deleteFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ note_id: -1,
+ });
});
- assertSuccess(seeded, 'add_footnote');
- const result = await deleteFootnote(opened.mgr, {
- session_id: opened.sessionId,
- note_id: -1,
+ await allureStep('Then the result is a RESERVED_TYPE error', () => {
+ assertFailure(result, 'RESERVED_TYPE', 'delete_footnote');
});
-
- assertFailure(result, 'RESERVED_TYPE', 'delete_footnote');
},
);
humanReadableTest.openspec('markers present in document view')(
'Scenario: markers present in document view',
async () => {
- const opened = await openSession(['Marker display paragraph.']);
- const created = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- text: 'Display marker note',
+ const content = await allureStep('Given a document with a footnote and its simple-format read output', async () => {
+ const opened = await openSession(['Marker display paragraph.']);
+ const created = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ text: 'Display marker note',
+ });
+ assertSuccess(created, 'add_footnote');
+
+ const read = await readFile(opened.mgr, {
+ session_id: opened.sessionId,
+ format: 'simple',
+ show_formatting: false,
+ });
+ assertSuccess(read, 'read_file');
+ return String(read.content);
});
- assertSuccess(created, 'add_footnote');
- const read = await readFile(opened.mgr, {
- session_id: opened.sessionId,
- format: 'simple',
- show_formatting: false,
+ await allureStep('Then the document view contains the footnote marker [^1]', () => {
+ expect(content).toContain('[^1]');
});
- assertSuccess(read, 'read_file');
- const content = String(read.content);
- expect(content).toContain('[^1]');
},
);
humanReadableTest.openspec('markers absent from edit matching')(
'Scenario: markers absent from edit matching',
async () => {
- const opened = await openSession(['Replace target sentence.']);
- const created = await addFootnote(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- text: 'Matching note',
+ const opened = await allureStep('Given a document with a footnote attached to a paragraph', async () => {
+ const opened = await openSession(['Replace target sentence.']);
+ const created = await addFootnote(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ text: 'Matching note',
+ });
+ assertSuccess(created, 'add_footnote');
+ return opened;
});
- assertSuccess(created, 'add_footnote');
- const withMarker = await replaceText(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- old_string: 'Replace target sentence.[^1]',
- new_string: 'Should not apply',
- instruction: 'Attempt replace using marker token',
+ await allureStep('Then replace_text using a marker token in old_string fails with TEXT_NOT_FOUND', async () => {
+ const withMarker = await replaceText(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ old_string: 'Replace target sentence.[^1]',
+ new_string: 'Should not apply',
+ instruction: 'Attempt replace using marker token',
+ });
+ assertFailure(withMarker, 'TEXT_NOT_FOUND', 'replace_text(marker token)');
});
- assertFailure(withMarker, 'TEXT_NOT_FOUND', 'replace_text(marker token)');
- const withoutMarker = await replaceText(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- old_string: 'Replace target sentence.',
- new_string: 'Replaced sentence.',
- instruction: 'Replace using raw paragraph text',
+ await allureStep('Then replace_text using raw paragraph text succeeds', async () => {
+ const withoutMarker = await replaceText(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ old_string: 'Replace target sentence.',
+ new_string: 'Replaced sentence.',
+ instruction: 'Replace using raw paragraph text',
+ });
+ assertSuccess(withoutMarker, 'replace_text(raw text)');
});
- assertSuccess(withoutMarker, 'replace_text(raw text)');
},
);
});
diff --git a/packages/docx-mcp/src/tools/add_multi_agent_plan_merge_phase_1.test.ts b/packages/docx-mcp/src/tools/add_multi_agent_plan_merge_phase_1.test.ts
index 89a8e60..d8a4c0e 100644
--- a/packages/docx-mcp/src/tools/add_multi_agent_plan_merge_phase_1.test.ts
+++ b/packages/docx-mcp/src/tools/add_multi_agent_plan_merge_phase_1.test.ts
@@ -4,7 +4,7 @@ import { describe, expect } from 'vitest';
import { initPlan } from './init_plan.js';
import { mergePlans } from './merge_plans.js';
-import { testAllure } from '../testing/allure-test.js';
+import { testAllure, allureStep } from '../testing/allure-test.js';
import {
createTestSessionManager,
createTrackedTempDir,
@@ -60,24 +60,29 @@ describe('Traceability: Multi-Agent Plan Merge (Phase 1)', () => {
humanReadableTest.openspec('init_plan returns revision-bound context')(
'Scenario: init_plan returns revision-bound context',
async () => {
- const manager = createTestSessionManager();
- const filePath = await writeDocx(['Alpha paragraph']);
+ const { manager, result, filePath } = await allureStep('Given a session initialized with a plan', async () => {
+ const manager = createTestSessionManager();
+ const filePath = await writeDocx(['Alpha paragraph']);
- const result = await initPlan(manager, {
- file_path: filePath,
- plan_name: 'agreement-review',
- orchestrator_id: 'coordinator-1',
+ const result = await initPlan(manager, {
+ file_path: filePath,
+ plan_name: 'agreement-review',
+ orchestrator_id: 'coordinator-1',
+ });
+ return { manager, result, filePath };
});
- expect(result.success).toBe(true);
- if (!result.success) return;
- expect(result.plan_context_id).toMatch(/^plctx_[A-Za-z0-9]{12}$/);
- expect(result.base_revision).toBe(0);
- expect(typeof result.resolved_session_id).toBe('string');
- expect(result.resolved_file_path).toBe(manager.normalizePath(filePath));
- expect(result.plan_context).toMatchObject({
- plan_name: 'agreement-review',
- orchestrator_id: 'coordinator-1',
+ await allureStep('Then the result contains revision-bound context', async () => {
+ expect(result.success).toBe(true);
+ if (!result.success) return;
+ expect(result.plan_context_id).toMatch(/^plctx_[A-Za-z0-9]{12}$/);
+ expect(result.base_revision).toBe(0);
+ expect(typeof result.resolved_session_id).toBe('string');
+ expect(result.resolved_file_path).toBe(manager.normalizePath(filePath));
+ expect(result.plan_context).toMatchObject({
+ plan_name: 'agreement-review',
+ orchestrator_id: 'coordinator-1',
+ });
});
},
);
@@ -85,181 +90,217 @@ describe('Traceability: Multi-Agent Plan Merge (Phase 1)', () => {
humanReadableTest.openspec('init_plan uses file-first session resolution')(
'Scenario: init_plan uses file-first session resolution',
async () => {
- const manager = createTestSessionManager();
- const filePath = await writeDocx(['Beta paragraph'], 'beta.docx');
-
- const result = await initPlan(manager, { file_path: filePath });
+ const { manager, result, filePath } = await allureStep('Given a plan initialized with file_path only', async () => {
+ const manager = createTestSessionManager();
+ const filePath = await writeDocx(['Beta paragraph'], 'beta.docx');
+ const result = await initPlan(manager, { file_path: filePath });
+ return { manager, result, filePath };
+ });
- expect(result.success).toBe(true);
- if (!result.success) return;
- expect(result.session_resolution).toBe('opened_new_session');
- expect(typeof result.resolved_session_id).toBe('string');
- expect(result.resolved_file_path).toBe(manager.normalizePath(filePath));
+ await allureStep('Then the session is resolved via file-first strategy', async () => {
+ expect(result.success).toBe(true);
+ if (!result.success) return;
+ expect(result.session_resolution).toBe('opened_new_session');
+ expect(typeof result.resolved_session_id).toBe('string');
+ expect(result.resolved_file_path).toBe(manager.normalizePath(filePath));
+ });
},
);
humanReadableTest.openspec('merge_plans returns merged artifact when no conflicts')(
'Scenario: merge_plans returns merged artifact when no conflicts',
async () => {
- const result = await mergePlans({
- plans: [
- {
- plan_id: 'termination',
- base_revision: 4,
- steps: [replaceStep('s1', '_bk_1', 0, 10)],
- },
- {
- plan_id: 'governing-law',
- base_revision: 4,
- steps: [replaceStep('s2', '_bk_1', 12, 20), insertStep('s3', '_bk_9', 'AFTER')],
- },
- ],
+ const result = await allureStep('Given two non-conflicting plans are merged', async () => {
+ return mergePlans({
+ plans: [
+ {
+ plan_id: 'termination',
+ base_revision: 4,
+ steps: [replaceStep('s1', '_bk_1', 0, 10)],
+ },
+ {
+ plan_id: 'governing-law',
+ base_revision: 4,
+ steps: [replaceStep('s2', '_bk_1', 12, 20), insertStep('s3', '_bk_9', 'AFTER')],
+ },
+ ],
+ });
});
- expect(result.success).toBe(true);
- if (!result.success) return;
- expect(result.has_conflicts).toBe(false);
- expect(result.conflict_count).toBe(0);
- const merged = result.merged_plan as { steps: Array<{ step_id: string }> };
- expect(merged.steps.map((s) => s.step_id)).toEqual(['s1', 's2', 's3']);
+ await allureStep('Then all steps are merged without conflicts', async () => {
+ expect(result.success).toBe(true);
+ if (!result.success) return;
+ expect(result.has_conflicts).toBe(false);
+ expect(result.conflict_count).toBe(0);
+ const merged = result.merged_plan as { steps: Array<{ step_id: string }> };
+ expect(merged.steps.map((s) => s.step_id)).toEqual(['s1', 's2', 's3']);
+ });
},
);
humanReadableTest.openspec('merge_plans reports base-revision conflict')(
'Scenario: merge_plans reports base-revision conflict',
async () => {
- const result = await mergePlans({
- plans: [
- { plan_id: 'a', base_revision: 1, steps: [replaceStep('s1', '_bk_1', 0, 5)] },
- { plan_id: 'b', base_revision: 2, steps: [replaceStep('s2', '_bk_2', 0, 5)] },
- ],
+ const result = await allureStep('Given plans with differing base revisions are merged', async () => {
+ return mergePlans({
+ plans: [
+ { plan_id: 'a', base_revision: 1, steps: [replaceStep('s1', '_bk_1', 0, 5)] },
+ { plan_id: 'b', base_revision: 2, steps: [replaceStep('s2', '_bk_2', 0, 5)] },
+ ],
+ });
});
- expect(result.success).toBe(false);
- if (result.success) return;
- const conflicts = result.conflicts as Array<{ code: string }>;
- expect(conflicts.some((c) => c.code === 'BASE_REVISION_CONFLICT')).toBe(true);
+ await allureStep('Then a BASE_REVISION_CONFLICT is reported', async () => {
+ expect(result.success).toBe(false);
+ if (result.success) return;
+ const conflicts = result.conflicts as Array<{ code: string }>;
+ expect(conflicts.some((c) => c.code === 'BASE_REVISION_CONFLICT')).toBe(true);
+ });
},
);
humanReadableTest.openspec('merge_plans reports overlapping replace ranges')(
'Scenario: merge_plans reports overlapping replace ranges',
async () => {
- const result = await mergePlans({
- plans: [
- { plan_id: 'a', base_revision: 9, steps: [replaceStep('s1', '_bk_7', 0, 8)] },
- { plan_id: 'b', base_revision: 9, steps: [replaceStep('s2', '_bk_7', 6, 12)] },
- ],
+ const result = await allureStep('Given plans with overlapping replace ranges on the same paragraph', async () => {
+ return mergePlans({
+ plans: [
+ { plan_id: 'a', base_revision: 9, steps: [replaceStep('s1', '_bk_7', 0, 8)] },
+ { plan_id: 'b', base_revision: 9, steps: [replaceStep('s2', '_bk_7', 6, 12)] },
+ ],
+ });
});
- expect(result.success).toBe(false);
- if (result.success) return;
- const conflicts = result.conflicts as Array<{ code: string }>;
- expect(conflicts.some((c) => c.code === 'OVERLAPPING_REPLACE_RANGE')).toBe(true);
+ await allureStep('Then an OVERLAPPING_REPLACE_RANGE conflict is reported', async () => {
+ expect(result.success).toBe(false);
+ if (result.success) return;
+ const conflicts = result.conflicts as Array<{ code: string }>;
+ expect(conflicts.some((c) => c.code === 'OVERLAPPING_REPLACE_RANGE')).toBe(true);
+ });
},
);
humanReadableTest.openspec('merge_plans reports unknown-range conflict for same paragraph')(
'Scenario: merge_plans reports unknown-range conflict for same paragraph',
async () => {
- const result = await mergePlans({
- plans: [
- {
- plan_id: 'a',
- base_revision: 5,
- steps: [
- {
- step_id: 's1',
- operation: 'replace_text',
- target_paragraph_id: '_bk_8',
- old_string: 'A',
- new_string: 'B',
- instruction: 'replace',
- },
- ],
- },
- {
- plan_id: 'b',
- base_revision: 5,
- steps: [replaceStep('s2', '_bk_8', 4, 9)],
- },
- ],
+ const result = await allureStep('Given plans where one step lacks a range on the same paragraph', async () => {
+ return mergePlans({
+ plans: [
+ {
+ plan_id: 'a',
+ base_revision: 5,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'replace_text',
+ target_paragraph_id: '_bk_8',
+ old_string: 'A',
+ new_string: 'B',
+ instruction: 'replace',
+ },
+ ],
+ },
+ {
+ plan_id: 'b',
+ base_revision: 5,
+ steps: [replaceStep('s2', '_bk_8', 4, 9)],
+ },
+ ],
+ });
});
- expect(result.success).toBe(false);
- if (result.success) return;
- const conflicts = result.conflicts as Array<{ code: string }>;
- expect(conflicts.some((c) => c.code === 'UNKNOWN_REPLACE_RANGE')).toBe(true);
+ await allureStep('Then an UNKNOWN_REPLACE_RANGE conflict is reported', async () => {
+ expect(result.success).toBe(false);
+ if (result.success) return;
+ const conflicts = result.conflicts as Array<{ code: string }>;
+ expect(conflicts.some((c) => c.code === 'UNKNOWN_REPLACE_RANGE')).toBe(true);
+ });
},
);
humanReadableTest.openspec('merge_plans reports insert-slot collision')(
'Scenario: merge_plans reports insert-slot collision',
async () => {
- const result = await mergePlans({
- plans: [
- { plan_id: 'a', base_revision: 11, steps: [insertStep('s1', '_bk_10', 'AFTER')] },
- { plan_id: 'b', base_revision: 11, steps: [insertStep('s2', '_bk_10', 'AFTER')] },
- ],
+ const result = await allureStep('Given plans inserting at the same anchor and position', async () => {
+ return mergePlans({
+ plans: [
+ { plan_id: 'a', base_revision: 11, steps: [insertStep('s1', '_bk_10', 'AFTER')] },
+ { plan_id: 'b', base_revision: 11, steps: [insertStep('s2', '_bk_10', 'AFTER')] },
+ ],
+ });
});
- expect(result.success).toBe(false);
- if (result.success) return;
- const conflicts = result.conflicts as Array<{ code: string }>;
- expect(conflicts.some((c) => c.code === 'INSERT_SLOT_COLLISION')).toBe(true);
+ await allureStep('Then an INSERT_SLOT_COLLISION conflict is reported', async () => {
+ expect(result.success).toBe(false);
+ if (result.success) return;
+ const conflicts = result.conflicts as Array<{ code: string }>;
+ expect(conflicts.some((c) => c.code === 'INSERT_SLOT_COLLISION')).toBe(true);
+ });
},
);
humanReadableTest.openspec('merge_plans reports duplicate step IDs')(
'Scenario: merge_plans reports duplicate step IDs',
async () => {
- const result = await mergePlans({
- plans: [
- { plan_id: 'a', base_revision: 2, steps: [replaceStep('dup-id', '_bk_1', 0, 2)] },
- { plan_id: 'b', base_revision: 2, steps: [replaceStep('dup-id', '_bk_2', 0, 2)] },
- ],
+ const result = await allureStep('Given plans with duplicate step IDs across agents', async () => {
+ return mergePlans({
+ plans: [
+ { plan_id: 'a', base_revision: 2, steps: [replaceStep('dup-id', '_bk_1', 0, 2)] },
+ { plan_id: 'b', base_revision: 2, steps: [replaceStep('dup-id', '_bk_2', 0, 2)] },
+ ],
+ });
});
- expect(result.success).toBe(false);
- if (result.success) return;
- const conflicts = result.conflicts as Array<{ code: string }>;
- expect(conflicts.some((c) => c.code === 'DUPLICATE_STEP_ID')).toBe(true);
+ await allureStep('Then a DUPLICATE_STEP_ID conflict is reported', async () => {
+ expect(result.success).toBe(false);
+ if (result.success) return;
+ const conflicts = result.conflicts as Array<{ code: string }>;
+ expect(conflicts.some((c) => c.code === 'DUPLICATE_STEP_ID')).toBe(true);
+ });
},
);
humanReadableTest.openspec('merge_plans fails by default when conflicts exist')(
'Scenario: merge_plans fails by default when conflicts exist',
async () => {
- const result = await mergePlans({
- plans: [
- { plan_id: 'a', base_revision: 1, steps: [replaceStep('s1', '_bk_1', 0, 8)] },
- { plan_id: 'b', base_revision: 1, steps: [replaceStep('s2', '_bk_1', 4, 10)] },
- ],
+ const result = await allureStep('Given conflicting plans merged with default settings', async () => {
+ return mergePlans({
+ plans: [
+ { plan_id: 'a', base_revision: 1, steps: [replaceStep('s1', '_bk_1', 0, 8)] },
+ { plan_id: 'b', base_revision: 1, steps: [replaceStep('s2', '_bk_1', 4, 10)] },
+ ],
+ });
});
- expect(result.success).toBe(false);
- if (result.success) return;
- expect(result.error.code).toBe('PLAN_CONFLICT');
- expect(result.has_conflicts).toBe(true);
+ await allureStep('Then the merge fails with PLAN_CONFLICT error', async () => {
+ expect(result.success).toBe(false);
+ if (result.success) return;
+ expect(result.error.code).toBe('PLAN_CONFLICT');
+ expect(result.has_conflicts).toBe(true);
+ });
},
);
humanReadableTest.openspec('merge_plans can return diagnostics without hard failure')(
'Scenario: merge_plans can return diagnostics without hard failure',
async () => {
- const result = await mergePlans({
- fail_on_conflict: false,
- plans: [
- { plan_id: 'a', base_revision: 1, steps: [replaceStep('s1', '_bk_1', 0, 8)] },
- { plan_id: 'b', base_revision: 1, steps: [replaceStep('s2', '_bk_1', 4, 10)] },
- ],
+ const result = await allureStep('Given conflicting plans merged with fail_on_conflict=false', async () => {
+ return mergePlans({
+ fail_on_conflict: false,
+ plans: [
+ { plan_id: 'a', base_revision: 1, steps: [replaceStep('s1', '_bk_1', 0, 8)] },
+ { plan_id: 'b', base_revision: 1, steps: [replaceStep('s2', '_bk_1', 4, 10)] },
+ ],
+ });
});
- expect(result.success).toBe(true);
- if (!result.success) return;
- expect(result.has_conflicts).toBe(true);
- expect(result.conflict_count).toBeGreaterThan(0);
- expect((result.merged_plan as { step_count: number }).step_count).toBe(2);
+ await allureStep('Then the merge succeeds with conflict diagnostics attached', async () => {
+ expect(result.success).toBe(true);
+ if (!result.success) return;
+ expect(result.has_conflicts).toBe(true);
+ expect(result.conflict_count).toBeGreaterThan(0);
+ expect((result.merged_plan as { step_count: number }).step_count).toBe(2);
+ });
},
);
});
diff --git a/packages/docx-mcp/src/tools/add_run_level_formatting_visibility.test.ts b/packages/docx-mcp/src/tools/add_run_level_formatting_visibility.test.ts
index 841793d..05d4f39 100644
--- a/packages/docx-mcp/src/tools/add_run_level_formatting_visibility.test.ts
+++ b/packages/docx-mcp/src/tools/add_run_level_formatting_visibility.test.ts
@@ -4,7 +4,7 @@ import path from 'node:path';
import { readFile } from './read_file.js';
import { replaceText } from './replace_text.js';
import { save } from './save.js';
-import { testAllure } from '../testing/allure-test.js';
+import { testAllure, allureStep } from '../testing/allure-test.js';
import {
assertSuccess,
parseOutputXml,
@@ -83,95 +83,114 @@ describe('Traceability: Run-Level Formatting Visibility', () => {
humanReadableTest.openspec('TOON output shows inline formatting tags at run boundaries by default')(
'Scenario: TOON output shows inline formatting tags at run boundaries by default',
async () => {
- const { mgr, sessionId } = await openSession([], {
- xml: buildFormattingFixtureXml(),
- extraFiles: {
- '[Content_Types].xml': CONTENT_TYPES_XML,
- '_rels/.rels': RELS_XML,
- 'word/_rels/document.xml.rels': DOCUMENT_RELS_XML,
- },
+ const { mgr, sessionId } = await allureStep('Given a doc with bold, italic, underline, highlight, and hyperlink runs', async () => {
+ return await openSession([], {
+ xml: buildFormattingFixtureXml(),
+ extraFiles: {
+ '[Content_Types].xml': CONTENT_TYPES_XML,
+ '_rels/.rels': RELS_XML,
+ 'word/_rels/document.xml.rels': DOCUMENT_RELS_XML,
+ },
+ });
});
- const read = await readFile(mgr, { session_id: sessionId });
- assertSuccess(read, 'read_file');
- const content = String(read.content);
+ const content = await allureStep('When read_file is called with default formatting', async () => {
+ const read = await readFile(mgr, { session_id: sessionId });
+ assertSuccess(read, 'read_file');
+ return String(read.content);
+ });
- expect(content).toContain('Bold');
- expect(content).toContain('Italic');
- expect(content).toContain('Underline');
- expect(content).toContain('Marked');
- expect(content).toContain('Portal');
+ await allureStep('Then TOON output contains inline formatting tags', async () => {
+ expect(content).toContain('Bold');
+ expect(content).toContain('Italic');
+ expect(content).toContain('Underline');
+ expect(content).toContain('Marked');
+ expect(content).toContain('Portal');
+ });
},
);
humanReadableTest.openspec('show_formatting=false suppresses inline tags')(
'Scenario: show_formatting=false suppresses inline tags',
async () => {
- const { mgr, sessionId } = await openSession([], {
- xml: buildFormattingFixtureXml(),
- extraFiles: {
- '[Content_Types].xml': CONTENT_TYPES_XML,
- '_rels/.rels': RELS_XML,
- 'word/_rels/document.xml.rels': DOCUMENT_RELS_XML,
- },
+ const { mgr, sessionId } = await allureStep('Given a doc with formatted runs', async () => {
+ return await openSession([], {
+ xml: buildFormattingFixtureXml(),
+ extraFiles: {
+ '[Content_Types].xml': CONTENT_TYPES_XML,
+ '_rels/.rels': RELS_XML,
+ 'word/_rels/document.xml.rels': DOCUMENT_RELS_XML,
+ },
+ });
});
- const read = await readFile(mgr, { session_id: sessionId, show_formatting: false });
- assertSuccess(read, 'read_file show_formatting=false');
- const content = String(read.content);
+ const content = await allureStep('When read_file is called with show_formatting=false', async () => {
+ const read = await readFile(mgr, { session_id: sessionId, show_formatting: false });
+ assertSuccess(read, 'read_file show_formatting=false');
+ return String(read.content);
+ });
- expect(content).not.toContain('');
- expect(content).not.toContain('');
- expect(content).not.toContain('');
- expect(content).not.toContain('');
- expect(content).not.toContain(' {
+ expect(content).not.toContain('');
+ expect(content).not.toContain('');
+ expect(content).not.toContain('');
+ expect(content).not.toContain('');
+ expect(content).not.toContain(' {
- const { mgr, sessionId, firstParaId, tmpDir } = await openSession([], {
- xml: buildEditFixtureXml(),
- extraFiles: {
- '[Content_Types].xml': CONTENT_TYPES_XML,
- '_rels/.rels': RELS_XML,
- },
+ const { mgr, sessionId, firstParaId, tmpDir } = await allureStep('Given a doc with a placeholder paragraph', async () => {
+ return await openSession([], {
+ xml: buildEditFixtureXml(),
+ extraFiles: {
+ '[Content_Types].xml': CONTENT_TYPES_XML,
+ '_rels/.rels': RELS_XML,
+ },
+ });
});
- const edited = await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: firstParaId,
- old_string: '[X]',
- new_string: 'Bold Italic Underline Marked',
- instruction: 'Validate writable formatting tags in new_string',
+ const outputPath = await allureStep('When replace_text injects formatting tags and the doc is saved', async () => {
+ const edited = await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: firstParaId,
+ old_string: '[X]',
+ new_string: 'Bold Italic Underline Marked',
+ instruction: 'Validate writable formatting tags in new_string',
+ });
+ assertSuccess(edited, 'replace_text');
+
+ const outputPath = path.join(tmpDir, 'formatted-output.docx');
+ const saved = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: outputPath,
+ clean_bookmarks: true,
+ save_format: 'clean',
+ });
+ assertSuccess(saved, 'save');
+ return outputPath;
});
- assertSuccess(edited, 'replace_text');
-
- const outputPath = path.join(tmpDir, 'formatted-output.docx');
- const saved = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: outputPath,
- clean_bookmarks: true,
- save_format: 'clean',
+
+ await allureStep('Then saved OOXML contains correct run-level formatting properties', async () => {
+ const { runs, runText, hasBold, hasItalic, hasUnderline, hasHighlight } = await parseOutputXml(outputPath);
+ const boldRun = runs.find((r) => runText(r).includes('Bold'));
+ const italicRun = runs.find((r) => runText(r).includes('Italic'));
+ const underlineRun = runs.find((r) => runText(r).includes('Underline'));
+ const highlightRun = runs.find((r) => runText(r).includes('Marked'));
+
+ expect(boldRun).toBeTruthy();
+ expect(italicRun).toBeTruthy();
+ expect(underlineRun).toBeTruthy();
+ expect(highlightRun).toBeTruthy();
+ expect(hasBold(boldRun!)).toBe(true);
+ expect(hasItalic(italicRun!)).toBe(true);
+ expect(hasUnderline(underlineRun!)).toBe(true);
+ expect(hasHighlight(highlightRun!)).toBe(true);
});
- assertSuccess(saved, 'save');
-
- const { runs, runText, hasBold, hasItalic, hasUnderline, hasHighlight } = await parseOutputXml(outputPath);
- const boldRun = runs.find((r) => runText(r).includes('Bold'));
- const italicRun = runs.find((r) => runText(r).includes('Italic'));
- const underlineRun = runs.find((r) => runText(r).includes('Underline'));
- const highlightRun = runs.find((r) => runText(r).includes('Marked'));
-
- expect(boldRun).toBeTruthy();
- expect(italicRun).toBeTruthy();
- expect(underlineRun).toBeTruthy();
- expect(highlightRun).toBeTruthy();
- expect(hasBold(boldRun!)).toBe(true);
- expect(hasItalic(italicRun!)).toBe(true);
- expect(hasUnderline(underlineRun!)).toBe(true);
- expect(hasHighlight(highlightRun!)).toBe(true);
},
);
});
diff --git a/packages/docx-mcp/src/tools/add_safe_docx_batch_apply_and_strict_anchor_resolution.test.ts b/packages/docx-mcp/src/tools/add_safe_docx_batch_apply_and_strict_anchor_resolution.test.ts
index 317c953..ea9f11b 100644
--- a/packages/docx-mcp/src/tools/add_safe_docx_batch_apply_and_strict_anchor_resolution.test.ts
+++ b/packages/docx-mcp/src/tools/add_safe_docx_batch_apply_and_strict_anchor_resolution.test.ts
@@ -5,7 +5,7 @@ import { describe, expect } from 'vitest';
import { MCP_TOOLS, dispatchToolCall } from '../server.js';
import { mergePlans } from './merge_plans.js';
import { readFile } from './read_file.js';
-import { testAllure } from '../testing/allure-test.js';
+import { testAllure, allureStep } from '../testing/allure-test.js';
import {
createTestSessionManager,
createTrackedTempDir,
@@ -33,99 +33,132 @@ describe('Traceability: Batch Apply and Strict Anchor Resolution', () => {
humanReadableTest.openspec('canonical names are advertised')(
'Scenario: canonical names are advertised',
async () => {
- const toolNames = new Set(MCP_TOOLS.map((tool) => tool.name));
- expect(toolNames.has('replace_text')).toBe(true);
- expect(toolNames.has('insert_paragraph')).toBe(true);
+ const toolNames = await allureStep('Given the MCP tool catalog', async () => {
+ return new Set(MCP_TOOLS.map((tool) => tool.name));
+ });
+
+ await allureStep('Then replace_text and insert_paragraph are advertised', () => {
+ expect(toolNames.has('replace_text')).toBe(true);
+ expect(toolNames.has('insert_paragraph')).toBe(true);
+ });
},
);
humanReadableTest.openspec('legacy aliases are unavailable')(
'Scenario: legacy aliases are unavailable',
async () => {
- const toolNames = new Set(MCP_TOOLS.map((tool) => tool.name));
- expect(toolNames.has('smart_edit')).toBe(false);
- expect(toolNames.has('smart_insert')).toBe(false);
+ const toolNames = await allureStep('Given the MCP tool catalog', async () => {
+ return new Set(MCP_TOOLS.map((tool) => tool.name));
+ });
+
+ await allureStep('Then smart_edit and smart_insert are absent', () => {
+ expect(toolNames.has('smart_edit')).toBe(false);
+ expect(toolNames.has('smart_insert')).toBe(false);
+ });
},
);
humanReadableTest.openspec('legacy aliases are rejected inside plan operations')(
'Scenario: legacy aliases are rejected inside plan operations',
async () => {
- const result = await mergePlans({
- plans: [
- {
- plan_id: 'legacy-edit',
- base_revision: 1,
- steps: [
- {
- step_id: 's1',
- operation: 'smart_edit',
- target_paragraph_id: '_bk_1',
- old_string: 'old',
- new_string: 'new',
- instruction: 'legacy operation',
- },
- ],
- },
- {
- plan_id: 'legacy-insert',
- base_revision: 1,
- steps: [
- {
- step_id: 's2',
- operation: 'smart_insert',
- positional_anchor_node_id: '_bk_1',
- new_string: 'new paragraph',
- instruction: 'legacy operation',
- },
- ],
- },
- ],
+ const result = await allureStep('When mergePlans is called with smart_edit and smart_insert steps', async () => {
+ return mergePlans({
+ plans: [
+ {
+ plan_id: 'legacy-edit',
+ base_revision: 1,
+ steps: [
+ {
+ step_id: 's1',
+ operation: 'smart_edit',
+ target_paragraph_id: '_bk_1',
+ old_string: 'old',
+ new_string: 'new',
+ instruction: 'legacy operation',
+ },
+ ],
+ },
+ {
+ plan_id: 'legacy-insert',
+ base_revision: 1,
+ steps: [
+ {
+ step_id: 's2',
+ operation: 'smart_insert',
+ positional_anchor_node_id: '_bk_1',
+ new_string: 'new paragraph',
+ instruction: 'legacy operation',
+ },
+ ],
+ },
+ ],
+ });
});
- expect(result.success).toBe(false);
- if (result.success) return;
- const conflicts = result.conflicts as Array<{ code: string }>;
- expect(conflicts.some((conflict) => conflict.code === 'INVALID_STEP_OPERATION')).toBe(true);
+ await allureStep('Then merge fails with INVALID_STEP_OPERATION conflict', () => {
+ expect(result.success).toBe(false);
+ if (result.success) return;
+ const conflicts = result.conflicts as Array<{ code: string }>;
+ expect(conflicts.some((conflict) => conflict.code === 'INVALID_STEP_OPERATION')).toBe(true);
+ });
},
);
humanReadableTest.openspec('MCP catalog omits open_document')(
'Scenario: MCP catalog omits open_document',
async () => {
- const toolNames = new Set(MCP_TOOLS.map((tool) => tool.name));
- expect(toolNames.has('open_document')).toBe(false);
- expect(toolNames.has('read_file')).toBe(true);
- expect(toolNames.has('grep')).toBe(true);
+ const toolNames = await allureStep('Given the MCP tool catalog', async () => {
+ return new Set(MCP_TOOLS.map((tool) => tool.name));
+ });
+
+ await allureStep('Then open_document is absent but read_file and grep are present', () => {
+ expect(toolNames.has('open_document')).toBe(false);
+ expect(toolNames.has('read_file')).toBe(true);
+ expect(toolNames.has('grep')).toBe(true);
+ });
},
);
humanReadableTest.openspec('open_document call is rejected as unsupported')(
'Scenario: open_document call is rejected as unsupported',
async () => {
- const manager = createTestSessionManager();
- const result = await dispatchToolCall(manager, 'open_document', {});
+ const manager = await allureStep('Given a test session manager', async () => {
+ return createTestSessionManager();
+ });
+
+ const result = await allureStep('When open_document is dispatched', async () => {
+ return dispatchToolCall(manager, 'open_document', {});
+ });
- expect(result.success).toBe(false);
- expect((result.error as { code?: string }).code).toBe('UNKNOWN_TOOL');
- expect(String((result.error as { message?: string }).message ?? '')).toContain('open_document');
- expect(String((result.error as { hint?: string }).hint ?? '')).toContain('read_file');
+ await allureStep('Then it fails with UNKNOWN_TOOL and hints at read_file', () => {
+ expect(result.success).toBe(false);
+ expect((result.error as { code?: string }).code).toBe('UNKNOWN_TOOL');
+ expect(String((result.error as { message?: string }).message ?? '')).toContain('open_document');
+ expect(String((result.error as { hint?: string }).hint ?? '')).toContain('read_file');
+ });
},
);
humanReadableTest.openspec('document tools accept file-first entry without pre-open')(
'Scenario: document tools accept file-first entry without pre-open',
async () => {
- const manager = createTestSessionManager();
- const filePath = await writeDocx(['Alpha clause']);
+ const { manager, filePath } = await allureStep('Given a session manager and a docx file', async () => {
+ const mgr = createTestSessionManager();
+ const fp = await writeDocx(['Alpha clause']);
+ return { manager: mgr, filePath: fp };
+ });
- const read = await readFile(manager, { file_path: filePath, format: 'simple' });
+ const read = await allureStep('When readFile is called with file_path (no prior open)', async () => {
+ return readFile(manager, { file_path: filePath, format: 'simple' });
+ });
- expect(read.success).toBe(true);
- if (!read.success) return;
- expect(read.session_resolution).toBe('opened_new_session');
- expect(typeof read.resolved_session_id).toBe('string');
- expect(read.resolved_file_path).toBe(manager.normalizePath(filePath));
+ await allureStep('Then a new session is opened and the file is resolved', () => {
+ expect(read.success).toBe(true);
+ if (!read.success) return;
+ expect(read.session_resolution).toBe('opened_new_session');
+ expect(typeof read.resolved_session_id).toBe('string');
+ expect(read.resolved_file_path).toBe(manager.normalizePath(filePath));
+ });
},
);
});
diff --git a/packages/docx-mcp/src/tools/add_safe_docx_layout_format_controls.test.ts b/packages/docx-mcp/src/tools/add_safe_docx_layout_format_controls.test.ts
index b9e0af3..f75bb09 100644
--- a/packages/docx-mcp/src/tools/add_safe_docx_layout_format_controls.test.ts
+++ b/packages/docx-mcp/src/tools/add_safe_docx_layout_format_controls.test.ts
@@ -7,7 +7,7 @@ import { readFile } from './read_file.js';
import { save } from './save.js';
import { formatLayout } from './format_layout.js';
import { firstParaIdFromToon, extractParaIdsFromToon } from '../testing/docx_test_utils.js';
-import { testAllure } from '../testing/allure-test.js';
+import { testAllure, allureStep } from '../testing/allure-test.js';
import {
assertFailure,
assertSuccess,
@@ -32,208 +32,262 @@ describe('Traceability: Layout Format Controls', () => {
registerCleanup();
humanReadableTest.openspec('format paragraph spacing by paragraph ID')('Scenario: format paragraph spacing by paragraph ID', async () => {
- const opened = await openSession(['Alpha clause', 'Beta clause']);
- const paraId = firstParaIdFromToon(opened.content);
-
- const formatted = await formatLayout(opened.mgr, {
- session_id: opened.sessionId,
- paragraph_spacing: {
- paragraph_ids: [paraId],
- before_twips: 120,
- after_twips: 240,
- line_twips: 360,
- line_rule: 'auto',
- },
- });
- assertSuccess(formatted, 'format_layout');
- expect(formatted.mutation_summary).toEqual({
- affected_paragraphs: 1,
- affected_rows: 0,
- affected_cells: 0,
- });
-
- const outPath = path.join(opened.tmpDir, 'layout-spacing.docx');
- const saved = await save(opened.mgr, {
- session_id: opened.sessionId,
- save_to_local_path: outPath,
- save_format: 'clean',
- clean_bookmarks: true,
- });
- assertSuccess(saved, 'save');
-
- const { dom } = await parseOutputXml(outPath);
- const spacing = dom.getElementsByTagNameNS(W_NS, 'spacing').item(0) as Element | null;
- expect(spacing).toBeTruthy();
- expect(getWAttr(spacing!, 'before')).toBe('120');
- expect(getWAttr(spacing!, 'after')).toBe('240');
- expect(getWAttr(spacing!, 'line')).toBe('360');
- expect(getWAttr(spacing!, 'lineRule')).toBe('auto');
+ const { opened, paraId } = await allureStep('Given a session with two paragraphs', async () => {
+ const o = await openSession(['Alpha clause', 'Beta clause']);
+ const pid = firstParaIdFromToon(o.content);
+ return { opened: o, paraId: pid };
+ });
+
+ const formatted = await allureStep('When paragraph spacing is applied', async () => {
+ const result = await formatLayout(opened.mgr, {
+ session_id: opened.sessionId,
+ paragraph_spacing: {
+ paragraph_ids: [paraId],
+ before_twips: 120,
+ after_twips: 240,
+ line_twips: 360,
+ line_rule: 'auto',
+ },
+ });
+ assertSuccess(result, 'format_layout');
+ return result;
+ });
+
+ await allureStep('Then mutation summary reports 1 affected paragraph', () => {
+ expect(formatted.mutation_summary).toEqual({
+ affected_paragraphs: 1,
+ affected_rows: 0,
+ affected_cells: 0,
+ });
+ });
+
+ await allureStep('Then saved output contains correct spacing attributes', async () => {
+ const outPath = path.join(opened.tmpDir, 'layout-spacing.docx');
+ const saved = await save(opened.mgr, {
+ session_id: opened.sessionId,
+ save_to_local_path: outPath,
+ save_format: 'clean',
+ clean_bookmarks: true,
+ });
+ assertSuccess(saved, 'save');
+
+ const { dom } = await parseOutputXml(outPath);
+ const spacing = dom.getElementsByTagNameNS(W_NS, 'spacing').item(0) as Element | null;
+ expect(spacing).toBeTruthy();
+ expect(getWAttr(spacing!, 'before')).toBe('120');
+ expect(getWAttr(spacing!, 'after')).toBe('240');
+ expect(getWAttr(spacing!, 'line')).toBe('360');
+ expect(getWAttr(spacing!, 'lineRule')).toBe('auto');
+ });
});
humanReadableTest.openspec('format table row height and cell padding')('Scenario: format table row height and cell padding', async () => {
- const xml =
- `` +
- `` +
- `` +
- `Table heading` +
- `` +
- `` +
- `A1` +
- `B1` +
- `` +
- `` +
- `A2` +
- `B2` +
- `` +
- `` +
- `` +
- ``;
-
- const opened = await openSession([], { xml, prefix: 'safe-docx-layout-table-' });
- const formatted = await formatLayout(opened.mgr, {
- session_id: opened.sessionId,
- row_height: {
- table_indexes: [0],
- row_indexes: [1],
- value_twips: 420,
- rule: 'exact',
- },
- cell_padding: {
- table_indexes: [0],
- row_indexes: [1],
- cell_indexes: [0],
- top_dxa: 80,
- bottom_dxa: 120,
- left_dxa: 60,
- right_dxa: 60,
- },
- });
- assertSuccess(formatted, 'format_layout');
- expect(formatted.mutation_summary).toEqual({
- affected_paragraphs: 0,
- affected_rows: 1,
- affected_cells: 1,
- });
-
- const outPath = path.join(opened.tmpDir, 'layout-table.docx');
- const saved = await save(opened.mgr, {
- session_id: opened.sessionId,
- save_to_local_path: outPath,
- save_format: 'clean',
- clean_bookmarks: true,
- });
- assertSuccess(saved, 'save');
-
- const { dom } = await parseOutputXml(outPath);
- const trHeight = dom.getElementsByTagNameNS(W_NS, 'trHeight').item(0) as Element | null;
- expect(trHeight).toBeTruthy();
- expect(getWAttr(trHeight!, 'val')).toBe('420');
- expect(getWAttr(trHeight!, 'hRule')).toBe('exact');
-
- const tcMar = dom.getElementsByTagNameNS(W_NS, 'tcMar').item(0) as Element | null;
- expect(tcMar).toBeTruthy();
- const top = tcMar!.getElementsByTagNameNS(W_NS, 'top').item(0) as Element | null;
- const bottom = tcMar!.getElementsByTagNameNS(W_NS, 'bottom').item(0) as Element | null;
- const left = tcMar!.getElementsByTagNameNS(W_NS, 'left').item(0) as Element | null;
- const right = tcMar!.getElementsByTagNameNS(W_NS, 'right').item(0) as Element | null;
- expect(getWAttr(top!, 'w')).toBe('80');
- expect(getWAttr(bottom!, 'w')).toBe('120');
- expect(getWAttr(left!, 'w')).toBe('60');
- expect(getWAttr(right!, 'w')).toBe('60');
+ const opened = await allureStep('Given a session with a 2x2 table', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `Table heading` +
+ `` +
+ `` +
+ `A1` +
+ `B1` +
+ `` +
+ `` +
+ `A2` +
+ `B2` +
+ `` +
+ `` +
+ `` +
+ ``;
+ return openSession([], { xml, prefix: 'safe-docx-layout-table-' });
+ });
+
+ const formatted = await allureStep('When row height and cell padding are applied', async () => {
+ const result = await formatLayout(opened.mgr, {
+ session_id: opened.sessionId,
+ row_height: {
+ table_indexes: [0],
+ row_indexes: [1],
+ value_twips: 420,
+ rule: 'exact',
+ },
+ cell_padding: {
+ table_indexes: [0],
+ row_indexes: [1],
+ cell_indexes: [0],
+ top_dxa: 80,
+ bottom_dxa: 120,
+ left_dxa: 60,
+ right_dxa: 60,
+ },
+ });
+ assertSuccess(result, 'format_layout');
+ return result;
+ });
+
+ await allureStep('Then mutation summary reports 1 row and 1 cell affected', () => {
+ expect(formatted.mutation_summary).toEqual({
+ affected_paragraphs: 0,
+ affected_rows: 1,
+ affected_cells: 1,
+ });
+ });
+
+ await allureStep('Then saved output contains correct trHeight and tcMar', async () => {
+ const outPath = path.join(opened.tmpDir, 'layout-table.docx');
+ const saved = await save(opened.mgr, {
+ session_id: opened.sessionId,
+ save_to_local_path: outPath,
+ save_format: 'clean',
+ clean_bookmarks: true,
+ });
+ assertSuccess(saved, 'save');
+
+ const { dom } = await parseOutputXml(outPath);
+ const trHeight = dom.getElementsByTagNameNS(W_NS, 'trHeight').item(0) as Element | null;
+ expect(trHeight).toBeTruthy();
+ expect(getWAttr(trHeight!, 'val')).toBe('420');
+ expect(getWAttr(trHeight!, 'hRule')).toBe('exact');
+
+ const tcMar = dom.getElementsByTagNameNS(W_NS, 'tcMar').item(0) as Element | null;
+ expect(tcMar).toBeTruthy();
+ const top = tcMar!.getElementsByTagNameNS(W_NS, 'top').item(0) as Element | null;
+ const bottom = tcMar!.getElementsByTagNameNS(W_NS, 'bottom').item(0) as Element | null;
+ const left = tcMar!.getElementsByTagNameNS(W_NS, 'left').item(0) as Element | null;
+ const right = tcMar!.getElementsByTagNameNS(W_NS, 'right').item(0) as Element | null;
+ expect(getWAttr(top!, 'w')).toBe('80');
+ expect(getWAttr(bottom!, 'w')).toBe('120');
+ expect(getWAttr(left!, 'w')).toBe('60');
+ expect(getWAttr(right!, 'w')).toBe('60');
+ });
});
humanReadableTest.openspec('invalid layout values are rejected with structured error')('Scenario: invalid layout values are rejected with structured error', async () => {
- const opened = await openSession(['Alpha clause']);
- const paraId = firstParaIdFromToon(opened.content);
-
- const invalid = await formatLayout(opened.mgr, {
- session_id: opened.sessionId,
- paragraph_spacing: {
- paragraph_ids: [paraId],
- after_twips: -1,
- },
- });
- assertFailure(invalid, 'VALIDATION_ERROR', 'format_layout invalid value');
- expect(invalid.error.hint).toContain('non-negative');
+ const { opened, paraId } = await allureStep('Given a session with one paragraph', async () => {
+ const o = await openSession(['Alpha clause']);
+ const pid = firstParaIdFromToon(o.content);
+ return { opened: o, paraId: pid };
+ });
+
+ const invalid = await allureStep('When a negative after_twips value is submitted', async () => {
+ return formatLayout(opened.mgr, {
+ session_id: opened.sessionId,
+ paragraph_spacing: {
+ paragraph_ids: [paraId],
+ after_twips: -1,
+ },
+ });
+ });
+
+ await allureStep('Then it fails with VALIDATION_ERROR hinting non-negative', () => {
+ assertFailure(invalid, 'VALIDATION_ERROR', 'format_layout invalid value');
+ expect(invalid.error.hint).toContain('non-negative');
+ });
});
humanReadableTest.openspec('no spacer paragraphs are introduced')('Scenario: no spacer paragraphs are introduced', async () => {
- const opened = await openSession(['One', 'Two', 'Three']);
- const beforeIds = extractParaIdsFromToon(opened.content);
-
- const formatted = await formatLayout(opened.mgr, {
- session_id: opened.sessionId,
- paragraph_spacing: {
- paragraph_ids: [beforeIds[1]!],
- after_twips: 200,
- },
- });
- assertSuccess(formatted, 'format_layout');
- expect(formatted.no_spacer_paragraphs).toBe(true);
-
- const after = await readFile(opened.mgr, { session_id: opened.sessionId, format: 'simple' });
- assertSuccess(after, 'read after');
- const afterIds = extractParaIdsFromToon(String(after.content));
- expect(afterIds.length).toBe(beforeIds.length);
+ const { opened, beforeIds } = await allureStep('Given a session with three paragraphs', async () => {
+ const o = await openSession(['One', 'Two', 'Three']);
+ const ids = extractParaIdsFromToon(o.content);
+ return { opened: o, beforeIds: ids };
+ });
+
+ const formatted = await allureStep('When spacing is applied to the second paragraph', async () => {
+ const result = await formatLayout(opened.mgr, {
+ session_id: opened.sessionId,
+ paragraph_spacing: {
+ paragraph_ids: [beforeIds[1]!],
+ after_twips: 200,
+ },
+ });
+ assertSuccess(result, 'format_layout');
+ return result;
+ });
+
+ await allureStep('Then no spacer paragraphs flag is true and count is unchanged', async () => {
+ expect(formatted.no_spacer_paragraphs).toBe(true);
+
+ const after = await readFile(opened.mgr, { session_id: opened.sessionId, format: 'simple' });
+ assertSuccess(after, 'read after');
+ const afterIds = extractParaIdsFromToon(String(after.content));
+ expect(afterIds.length).toBe(beforeIds.length);
+ });
});
humanReadableTest.openspec('paragraph IDs remain stable after layout formatting')('Scenario: paragraph IDs remain stable after layout formatting', async () => {
- const opened = await openSession(['First', 'Second', 'Third']);
- const beforeIds = extractParaIdsFromToon(opened.content);
-
- const formatted = await formatLayout(opened.mgr, {
- session_id: opened.sessionId,
- paragraph_spacing: {
- paragraph_ids: [beforeIds[0]!, beforeIds[2]!],
- before_twips: 120,
- after_twips: 180,
- },
- });
- assertSuccess(formatted, 'format_layout');
-
- const after = await readFile(opened.mgr, { session_id: opened.sessionId, format: 'simple' });
- assertSuccess(after, 'read after');
- const afterIds = extractParaIdsFromToon(String(after.content));
- expect(afterIds).toEqual(beforeIds);
+ const { opened, beforeIds } = await allureStep('Given a session with three paragraphs', async () => {
+ const o = await openSession(['First', 'Second', 'Third']);
+ const ids = extractParaIdsFromToon(o.content);
+ return { opened: o, beforeIds: ids };
+ });
+
+ await allureStep('When spacing is applied to the first and third paragraphs', async () => {
+ const result = await formatLayout(opened.mgr, {
+ session_id: opened.sessionId,
+ paragraph_spacing: {
+ paragraph_ids: [beforeIds[0]!, beforeIds[2]!],
+ before_twips: 120,
+ after_twips: 180,
+ },
+ });
+ assertSuccess(result, 'format_layout');
+ });
+
+ await allureStep('Then paragraph IDs are identical before and after', async () => {
+ const after = await readFile(opened.mgr, { session_id: opened.sessionId, format: 'simple' });
+ assertSuccess(after, 'read after');
+ const afterIds = extractParaIdsFromToon(String(after.content));
+ expect(afterIds).toEqual(beforeIds);
+ });
});
humanReadableTest.openspec('npx runtime remains Python-free')('Scenario: npx runtime remains Python-free', async () => {
- const packageJsonPath = fileURLToPath(new URL('../../package.json', import.meta.url));
- const pkg = JSON.parse(await fs.readFile(packageJsonPath, 'utf-8')) as {
- dependencies?: Record;
- optionalDependencies?: Record;
- };
-
- const deps = {
- ...(pkg.dependencies ?? {}),
- ...(pkg.optionalDependencies ?? {}),
- };
- const depNames = Object.keys(deps).join(' ').toLowerCase();
- expect(depNames.includes('python')).toBe(false);
- expect(depNames.includes('aspose')).toBe(false);
+ const depNames = await allureStep('Given the combined dependencies from package.json', async () => {
+ const packageJsonPath = fileURLToPath(new URL('../../package.json', import.meta.url));
+ const pkg = JSON.parse(await fs.readFile(packageJsonPath, 'utf-8')) as {
+ dependencies?: Record;
+ optionalDependencies?: Record;
+ };
+ const deps = {
+ ...(pkg.dependencies ?? {}),
+ ...(pkg.optionalDependencies ?? {}),
+ };
+ return Object.keys(deps).join(' ').toLowerCase();
+ });
+
+ await allureStep('Then no python or aspose dependencies exist', () => {
+ expect(depNames.includes('python')).toBe(false);
+ expect(depNames.includes('aspose')).toBe(false);
+ });
});
humanReadableTest.openspec('format_layout does not invoke external process tooling at runtime')(
'Scenario: format_layout does not invoke external process tooling at runtime',
async () => {
- const opened = await openSession(['Runtime boundary']);
- const paraId = firstParaIdFromToon(opened.content);
-
- const originalPath = process.env.PATH;
- process.env.PATH = '';
-
- try {
- const formatted = await formatLayout(opened.mgr, {
- session_id: opened.sessionId,
- paragraph_spacing: {
- paragraph_ids: [paraId],
- after_twips: 120,
- },
- });
- assertSuccess(formatted, 'format_layout');
- } finally {
- process.env.PATH = originalPath;
- }
+ const { opened, paraId } = await allureStep('Given a session and an empty PATH', async () => {
+ const o = await openSession(['Runtime boundary']);
+ const pid = firstParaIdFromToon(o.content);
+ return { opened: o, paraId: pid };
+ });
+
+ await allureStep('When format_layout runs with PATH emptied', async () => {
+ const originalPath = process.env.PATH;
+ process.env.PATH = '';
+
+ try {
+ const formatted = await formatLayout(opened.mgr, {
+ session_id: opened.sessionId,
+ paragraph_spacing: {
+ paragraph_ids: [paraId],
+ after_twips: 120,
+ },
+ });
+ assertSuccess(formatted, 'format_layout');
+ } finally {
+ process.env.PATH = originalPath;
+ }
+ });
},
);
});
diff --git a/packages/docx-mcp/src/tools/delete_comment.test.ts b/packages/docx-mcp/src/tools/delete_comment.test.ts
index 3d6a760..6f83d5d 100644
--- a/packages/docx-mcp/src/tools/delete_comment.test.ts
+++ b/packages/docx-mcp/src/tools/delete_comment.test.ts
@@ -1,5 +1,5 @@
import { describe, expect } from 'vitest';
-import { testAllure, allureJsonAttachment } from '../testing/allure-test.js';
+import { testAllure, allureJsonAttachment, allureStep } from '../testing/allure-test.js';
import {
assertFailure,
assertSuccess,
@@ -34,207 +34,253 @@ describe('OpenSpec traceability: add-comment-delete-tool', () => {
humanReadableTest.openspec('delete comment successfully')(
'Scenario: delete root comment with no replies',
async () => {
- const opened = await openSession(['Paragraph with a comment.']);
-
- const added = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Alice',
- text: 'Root comment to delete.',
+ const { opened, added } = await allureStep('Given a session with one root comment', async () => {
+ const opened = await openSession(['Paragraph with a comment.']);
+ const added = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ author: 'Alice',
+ text: 'Root comment to delete.',
+ });
+ assertSuccess(added, 'add_comment');
+ const beforeDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(beforeDelete, 'get_comments (before)');
+ expect(commentsList(beforeDelete.comments)).toHaveLength(1);
+ return { opened, added };
});
- assertSuccess(added, 'add_comment');
-
- const beforeDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(beforeDelete, 'get_comments (before)');
- expect(commentsList(beforeDelete.comments)).toHaveLength(1);
- const result = await deleteComment(opened.mgr, {
- session_id: opened.sessionId,
- comment_id: added.comment_id as number,
+ const result = await allureStep('When the root comment is deleted', async () => {
+ const result = await deleteComment(opened.mgr, {
+ session_id: opened.sessionId,
+ comment_id: added.comment_id as number,
+ });
+ assertSuccess(result, 'delete_comment');
+ await allureJsonAttachment('delete_comment-response', result);
+ return result;
});
- assertSuccess(result, 'delete_comment');
- await allureJsonAttachment('delete_comment-response', result);
- expect(result.comment_id).toBe(added.comment_id);
- expect(result.session_id).toBe(opened.sessionId);
- const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(afterDelete, 'get_comments (after)');
- expect(commentsList(afterDelete.comments)).toHaveLength(0);
+ await allureStep('Then the comment is removed and IDs match', async () => {
+ expect(result.comment_id).toBe(added.comment_id);
+ expect(result.session_id).toBe(opened.sessionId);
+ const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(afterDelete, 'get_comments (after)');
+ expect(commentsList(afterDelete.comments)).toHaveLength(0);
+ });
},
);
humanReadableTest.openspec('delete comment with replies cascades')(
'Scenario: delete root comment cascades to all replies',
async () => {
- const opened = await openSession(['Paragraph for cascade test.']);
-
- const root = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Alice',
- text: 'Root with replies.',
+ const { opened, root } = await allureStep('Given a root comment with one reply', async () => {
+ const opened = await openSession(['Paragraph for cascade test.']);
+ const root = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ author: 'Alice',
+ text: 'Root with replies.',
+ });
+ assertSuccess(root, 'add_comment (root)');
+ const reply = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ parent_comment_id: root.comment_id as number,
+ author: 'Bob',
+ text: 'Reply to root.',
+ });
+ assertSuccess(reply, 'add_comment (reply)');
+ const beforeDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(beforeDelete, 'get_comments (before)');
+ expect(commentsList(beforeDelete.comments)[0].replies).toHaveLength(1);
+ return { opened, root };
});
- assertSuccess(root, 'add_comment (root)');
- const reply = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- parent_comment_id: root.comment_id as number,
- author: 'Bob',
- text: 'Reply to root.',
+ await allureStep('When the root comment is deleted', async () => {
+ const result = await deleteComment(opened.mgr, {
+ session_id: opened.sessionId,
+ comment_id: root.comment_id as number,
+ });
+ assertSuccess(result, 'delete_comment (cascade)');
});
- assertSuccess(reply, 'add_comment (reply)');
-
- const beforeDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(beforeDelete, 'get_comments (before)');
- const rootBefore = commentsList(beforeDelete.comments)[0];
- expect(rootBefore.replies).toHaveLength(1);
- const result = await deleteComment(opened.mgr, {
- session_id: opened.sessionId,
- comment_id: root.comment_id as number,
+ await allureStep('Then all comments including replies are removed', async () => {
+ const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(afterDelete, 'get_comments (after)');
+ expect(commentsList(afterDelete.comments)).toHaveLength(0);
});
- assertSuccess(result, 'delete_comment (cascade)');
-
- const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(afterDelete, 'get_comments (after)');
- expect(commentsList(afterDelete.comments)).toHaveLength(0);
},
);
humanReadableTest.openspec('delete a single leaf reply')(
'Scenario: delete single leaf reply',
async () => {
- const opened = await openSession(['Paragraph for reply delete.']);
-
- const root = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Alice',
- text: 'Root stays intact.',
+ const { opened, reply } = await allureStep('Given a root comment with one reply', async () => {
+ const opened = await openSession(['Paragraph for reply delete.']);
+ const root = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ author: 'Alice',
+ text: 'Root stays intact.',
+ });
+ assertSuccess(root, 'add_comment (root)');
+ const reply = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ parent_comment_id: root.comment_id as number,
+ author: 'Bob',
+ text: 'Reply to delete.',
+ });
+ assertSuccess(reply, 'add_comment (reply)');
+ return { opened, reply };
});
- assertSuccess(root, 'add_comment (root)');
- const reply = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- parent_comment_id: root.comment_id as number,
- author: 'Bob',
- text: 'Reply to delete.',
+ await allureStep('When the leaf reply is deleted', async () => {
+ const result = await deleteComment(opened.mgr, {
+ session_id: opened.sessionId,
+ comment_id: reply.comment_id as number,
+ });
+ assertSuccess(result, 'delete_comment (reply)');
});
- assertSuccess(reply, 'add_comment (reply)');
- const result = await deleteComment(opened.mgr, {
- session_id: opened.sessionId,
- comment_id: reply.comment_id as number,
+ await allureStep('Then only the root comment remains with no replies', async () => {
+ const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(afterDelete, 'get_comments (after)');
+ const comments = commentsList(afterDelete.comments);
+ expect(comments).toHaveLength(1);
+ expect(comments[0].author).toBe('Alice');
+ expect(comments[0].replies).toHaveLength(0);
});
- assertSuccess(result, 'delete_comment (reply)');
-
- const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(afterDelete, 'get_comments (after)');
- const comments = commentsList(afterDelete.comments);
- expect(comments).toHaveLength(1);
- expect(comments[0].author).toBe('Alice');
- expect(comments[0].replies).toHaveLength(0);
},
);
humanReadableTest.openspec('delete a non-leaf reply cascades to descendants')(
'Scenario: delete non-leaf reply cascades to descendants',
async () => {
- const opened = await openSession(['Paragraph for deep cascade.']);
-
- const root = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Alice',
- text: 'Root comment.',
+ const { opened, reply1 } = await allureStep('Given a three-level comment thread', async () => {
+ const opened = await openSession(['Paragraph for deep cascade.']);
+ const root = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ author: 'Alice',
+ text: 'Root comment.',
+ });
+ assertSuccess(root, 'add_comment (root)');
+ const reply1 = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ parent_comment_id: root.comment_id as number,
+ author: 'Bob',
+ text: 'Middle reply.',
+ });
+ assertSuccess(reply1, 'add_comment (reply1)');
+ const reply2 = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ parent_comment_id: reply1.comment_id as number,
+ author: 'Carol',
+ text: 'Nested reply.',
+ });
+ assertSuccess(reply2, 'add_comment (reply2)');
+ return { opened, reply1 };
});
- assertSuccess(root, 'add_comment (root)');
- const reply1 = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- parent_comment_id: root.comment_id as number,
- author: 'Bob',
- text: 'Middle reply.',
+ await allureStep('When the middle reply is deleted', async () => {
+ const result = await deleteComment(opened.mgr, {
+ session_id: opened.sessionId,
+ comment_id: reply1.comment_id as number,
+ });
+ assertSuccess(result, 'delete_comment (non-leaf)');
});
- assertSuccess(reply1, 'add_comment (reply1)');
- const reply2 = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- parent_comment_id: reply1.comment_id as number,
- author: 'Carol',
- text: 'Nested reply.',
+ await allureStep('Then only root remains with no replies', async () => {
+ const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(afterDelete, 'get_comments (after)');
+ const comments = commentsList(afterDelete.comments);
+ expect(comments).toHaveLength(1);
+ expect(comments[0].author).toBe('Alice');
+ expect(comments[0].replies).toHaveLength(0);
});
- assertSuccess(reply2, 'add_comment (reply2)');
-
- // Delete the middle reply — should cascade to reply2 but leave root intact
- const result = await deleteComment(opened.mgr, {
- session_id: opened.sessionId,
- comment_id: reply1.comment_id as number,
- });
- assertSuccess(result, 'delete_comment (non-leaf)');
-
- const afterDelete = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(afterDelete, 'get_comments (after)');
- const comments = commentsList(afterDelete.comments);
- expect(comments).toHaveLength(1);
- expect(comments[0].author).toBe('Alice');
- expect(comments[0].replies).toHaveLength(0);
},
);
humanReadableTest.openspec('comment not found returns error')(
'Scenario: comment not found returns error',
async () => {
- const opened = await openSession(['Paragraph for not-found test.']);
+ const opened = await allureStep('Given a session with no matching comment', async () => {
+ return openSession(['Paragraph for not-found test.']);
+ });
+
+ const result = await allureStep('When deleting a non-existent comment ID', async () => {
+ return deleteComment(opened.mgr, {
+ session_id: opened.sessionId,
+ comment_id: 999999,
+ });
+ });
- const result = await deleteComment(opened.mgr, {
- session_id: opened.sessionId,
- comment_id: 999999,
+ await allureStep('Then COMMENT_NOT_FOUND error is returned', async () => {
+ assertFailure(result, 'COMMENT_NOT_FOUND', 'delete_comment');
});
- assertFailure(result, 'COMMENT_NOT_FOUND', 'delete_comment');
},
);
humanReadableTest.openspec('missing comment_id returns error')(
'Scenario: missing comment_id returns error',
async () => {
- const opened = await openSession(['Paragraph for missing param.']);
+ const opened = await allureStep('Given a session with no comment_id provided', async () => {
+ return openSession(['Paragraph for missing param.']);
+ });
+
+ const result = await allureStep('When delete_comment is called without comment_id', async () => {
+ return deleteComment(opened.mgr, {
+ session_id: opened.sessionId,
+ });
+ });
- const result = await deleteComment(opened.mgr, {
- session_id: opened.sessionId,
+ await allureStep('Then MISSING_PARAMETER error is returned', async () => {
+ assertFailure(result, 'MISSING_PARAMETER', 'delete_comment');
});
- assertFailure(result, 'MISSING_PARAMETER', 'delete_comment');
},
);
humanReadableTest.openspec('missing session context returns error')(
'Scenario: missing session context returns error',
async () => {
- const mgr = createTestSessionManager();
- const result = await deleteComment(mgr, { comment_id: 0 });
- assertFailure(result, 'MISSING_SESSION_CONTEXT', 'delete_comment');
+ const mgr = await allureStep('Given a session manager with no session', async () => {
+ return createTestSessionManager();
+ });
+
+ const result = await allureStep('When delete_comment is called without session context', async () => {
+ return deleteComment(mgr, { comment_id: 0 });
+ });
+
+ await allureStep('Then MISSING_SESSION_CONTEXT error is returned', async () => {
+ assertFailure(result, 'MISSING_SESSION_CONTEXT', 'delete_comment');
+ });
},
);
humanReadableTest.openspec('delete_comment supports session-or-file resolution')(
'Scenario: session-or-file resolution',
async () => {
- const opened = await openSession(['Paragraph for file resolution.']);
+ const { opened, added } = await allureStep('Given a session with a comment added', async () => {
+ const opened = await openSession(['Paragraph for file resolution.']);
+ const added = await addComment(opened.mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.firstParaId,
+ author: 'Tester',
+ text: 'File resolution comment.',
+ });
+ assertSuccess(added, 'add_comment');
+ return { opened, added };
+ });
- const added = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Tester',
- text: 'File resolution comment.',
+ const result = await allureStep('When delete_comment is called with file_path instead of session_id', async () => {
+ return deleteComment(opened.mgr, {
+ file_path: opened.inputPath,
+ comment_id: added.comment_id as number,
+ });
});
- assertSuccess(added, 'add_comment');
- const result = await deleteComment(opened.mgr, {
- file_path: opened.inputPath,
- comment_id: added.comment_id as number,
+ await allureStep('Then the comment is deleted and session_id is resolved', async () => {
+ assertSuccess(result, 'delete_comment (file_path)');
+ expect(result.session_id).toBeTruthy();
});
- assertSuccess(result, 'delete_comment (file_path)');
- expect(result.session_id).toBeTruthy();
},
);
diff --git a/packages/docx-mcp/src/tools/get_comments.test.ts b/packages/docx-mcp/src/tools/get_comments.test.ts
index 99c9957..8855d15 100644
--- a/packages/docx-mcp/src/tools/get_comments.test.ts
+++ b/packages/docx-mcp/src/tools/get_comments.test.ts
@@ -1,5 +1,5 @@
import { describe, expect } from 'vitest';
-import { testAllure, allureJsonAttachment } from '../testing/allure-test.js';
+import { testAllure, allureStep, allureJsonAttachment } from '../testing/allure-test.js';
import {
assertFailure,
assertSuccess,
@@ -24,174 +24,231 @@ describe('OpenSpec traceability: add-comment-read-tool', () => {
humanReadableTest.openspec('get_comments returns comment metadata and text')(
'Scenario: get_comments returns comment metadata and text',
async () => {
- const opened = await openSession([
- 'First paragraph with a comment.',
- 'Second paragraph with another comment.',
- ]);
+ const opened = await allureStep('Given a session with two commented paragraphs', async () => {
+ const o = await openSession([
+ 'First paragraph with a comment.',
+ 'Second paragraph with another comment.',
+ ]);
- await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.paraIds[0]!,
- author: 'Alice',
- text: 'Please review this clause.',
- initials: 'AL',
+ await addComment(o.mgr, {
+ session_id: o.sessionId,
+ target_paragraph_id: o.paraIds[0]!,
+ author: 'Alice',
+ text: 'Please review this clause.',
+ initials: 'AL',
+ });
+
+ await addComment(o.mgr, {
+ session_id: o.sessionId,
+ target_paragraph_id: o.paraIds[1]!,
+ author: 'Bob',
+ text: 'This needs clarification.',
+ initials: 'BO',
+ });
+
+ return o;
});
- await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.paraIds[1]!,
- author: 'Bob',
- text: 'This needs clarification.',
- initials: 'BO',
+ const result = await allureStep('When get_comments is called', async () => {
+ const r = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(r, 'get_comments');
+ await allureJsonAttachment('get_comments-response', r);
+ return r;
});
- const result = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(result, 'get_comments');
- await allureJsonAttachment('get_comments-response', result);
+ await allureStep('Then both comments are returned with correct metadata', async () => {
+ const comments = result.comments as Array>;
+ expect(comments).toHaveLength(2);
- const comments = result.comments as Array>;
- expect(comments).toHaveLength(2);
+ expect(comments[0]).toEqual(
+ expect.objectContaining({
+ id: expect.any(Number),
+ author: 'Alice',
+ initials: 'AL',
+ text: expect.stringContaining('Please review this clause.'),
+ }),
+ );
+ // anchored_paragraph_id is string | null depending on primitives resolution
+ expect(
+ comments[0]!.anchored_paragraph_id === null ||
+ typeof comments[0]!.anchored_paragraph_id === 'string',
+ ).toBe(true);
- expect(comments[0]).toEqual(
- expect.objectContaining({
- id: expect.any(Number),
- author: 'Alice',
- initials: 'AL',
- text: expect.stringContaining('Please review this clause.'),
- }),
- );
- // anchored_paragraph_id is string | null depending on primitives resolution
- expect(
- comments[0]!.anchored_paragraph_id === null ||
- typeof comments[0]!.anchored_paragraph_id === 'string',
- ).toBe(true);
-
- // date should be string or null
- expect(
- comments[0]!.date === null || typeof comments[0]!.date === 'string',
- ).toBe(true);
-
- expect(comments[1]).toEqual(
- expect.objectContaining({
- id: expect.any(Number),
- author: 'Bob',
- initials: 'BO',
- text: expect.stringContaining('This needs clarification.'),
- }),
- );
- expect(
- comments[1]!.anchored_paragraph_id === null ||
- typeof comments[1]!.anchored_paragraph_id === 'string',
- ).toBe(true);
+ // date should be string or null
+ expect(
+ comments[0]!.date === null || typeof comments[0]!.date === 'string',
+ ).toBe(true);
+
+ expect(comments[1]).toEqual(
+ expect.objectContaining({
+ id: expect.any(Number),
+ author: 'Bob',
+ initials: 'BO',
+ text: expect.stringContaining('This needs clarification.'),
+ }),
+ );
+ expect(
+ comments[1]!.anchored_paragraph_id === null ||
+ typeof comments[1]!.anchored_paragraph_id === 'string',
+ ).toBe(true);
- expect(result.session_id).toBe(opened.sessionId);
+ expect(result.session_id).toBe(opened.sessionId);
+ });
},
);
humanReadableTest.openspec('threaded replies are nested under parent comments')(
'Scenario: threaded replies are nested under parent comments',
async () => {
- const opened = await openSession(['Contract clause for discussion.']);
+ const { opened, root } = await allureStep('Given a root comment with a threaded reply', async () => {
+ const o = await openSession(['Contract clause for discussion.']);
- const root = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Alice',
- text: 'Is this clause enforceable?',
- });
- assertSuccess(root, 'add_comment (root)');
+ const r = await addComment(o.mgr, {
+ session_id: o.sessionId,
+ target_paragraph_id: o.firstParaId,
+ author: 'Alice',
+ text: 'Is this clause enforceable?',
+ });
+ assertSuccess(r, 'add_comment (root)');
+
+ const reply = await addComment(o.mgr, {
+ session_id: o.sessionId,
+ parent_comment_id: r.comment_id as number,
+ author: 'Bob',
+ text: 'Yes, per section 4.2.',
+ });
+ assertSuccess(reply, 'add_comment (reply)');
- const reply = await addComment(opened.mgr, {
- session_id: opened.sessionId,
- parent_comment_id: root.comment_id as number,
- author: 'Bob',
- text: 'Yes, per section 4.2.',
+ return { opened: o, root: r };
});
- assertSuccess(reply, 'add_comment (reply)');
- const result = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(result, 'get_comments');
- await allureJsonAttachment('get_comments-threaded', result);
+ const result = await allureStep('When get_comments is called', async () => {
+ const r = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(r, 'get_comments');
+ await allureJsonAttachment('get_comments-threaded', r);
+ return r;
+ });
- const comments = result.comments as Array>;
- expect(comments).toHaveLength(1);
+ await allureStep('Then the reply is nested under the root comment', async () => {
+ const comments = result.comments as Array>;
+ expect(comments).toHaveLength(1);
- const rootComment = comments[0]!;
- expect(rootComment.author).toBe('Alice');
+ const rootComment = comments[0]!;
+ expect(rootComment.author).toBe('Alice');
- const replies = rootComment.replies as Array>;
- expect(replies).toHaveLength(1);
- expect(replies[0]).toEqual(
- expect.objectContaining({
- id: expect.any(Number),
- author: 'Bob',
- text: expect.stringContaining('Yes, per section 4.2.'),
- }),
- );
+ const replies = rootComment.replies as Array>;
+ expect(replies).toHaveLength(1);
+ expect(replies[0]).toEqual(
+ expect.objectContaining({
+ id: expect.any(Number),
+ author: 'Bob',
+ text: expect.stringContaining('Yes, per section 4.2.'),
+ }),
+ );
+ });
},
);
humanReadableTest.openspec('document with no comments returns empty array')(
'Scenario: document with no comments returns empty array',
async () => {
- const opened = await openSession(['No comments in this document.']);
+ const opened = await allureStep('Given a session with no comments', async () => {
+ return await openSession(['No comments in this document.']);
+ });
+
+ const result = await allureStep('When get_comments is called', async () => {
+ const r = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(r, 'get_comments');
+ return r;
+ });
- const result = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(result, 'get_comments');
- expect(result.comments).toEqual([]);
+ await allureStep('Then an empty comments array is returned', async () => {
+ expect(result.comments).toEqual([]);
+ });
},
);
humanReadableTest.openspec('get_comments supports session-or-file resolution')(
'Scenario: get_comments supports session-or-file resolution',
async () => {
- const opened = await openSession(['File resolution paragraph.']);
+ const opened = await allureStep('Given a session with a comment added via session_id', async () => {
+ const o = await openSession(['File resolution paragraph.']);
+
+ await addComment(o.mgr, {
+ session_id: o.sessionId,
+ target_paragraph_id: o.firstParaId,
+ author: 'Tester',
+ text: 'File resolution comment.',
+ });
- await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Tester',
- text: 'File resolution comment.',
+ return o;
});
- const result = await getComments(opened.mgr, { file_path: opened.inputPath });
- assertSuccess(result, 'get_comments (file_path)');
- expect(result.session_id).toBeTruthy();
- const comments = result.comments as Array>;
- expect(comments).toHaveLength(1);
+ const result = await allureStep('When get_comments is called with file_path instead', async () => {
+ const r = await getComments(opened.mgr, { file_path: opened.inputPath });
+ assertSuccess(r, 'get_comments (file_path)');
+ return r;
+ });
+
+ await allureStep('Then the comment is returned with a resolved session_id', async () => {
+ expect(result.session_id).toBeTruthy();
+ const comments = result.comments as Array>;
+ expect(comments).toHaveLength(1);
+ });
},
);
humanReadableTest.openspec('missing session context returns error')(
'Scenario: missing session context returns error',
async () => {
- const mgr = createTestSessionManager();
- const result = await getComments(mgr, {});
- assertFailure(result, 'MISSING_SESSION_CONTEXT', 'get_comments');
+ const mgr = await allureStep('Given a session manager with no context provided', async () => {
+ return createTestSessionManager();
+ });
+
+ const result = await allureStep('When get_comments is called without session_id or file_path', async () => {
+ return await getComments(mgr, {});
+ });
+
+ await allureStep('Then a MISSING_SESSION_CONTEXT error is returned', async () => {
+ assertFailure(result, 'MISSING_SESSION_CONTEXT', 'get_comments');
+ });
},
);
humanReadableTest.openspec('get_comments does not mutate session state')(
'Scenario: get_comments does not mutate session state',
async () => {
- const opened = await openSession(['Immutability check paragraph.']);
+ const { opened, revisionBefore, editCountBefore } = await allureStep(
+ 'Given a session with a comment and recorded revision state',
+ async () => {
+ const o = await openSession(['Immutability check paragraph.']);
- await addComment(opened.mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.firstParaId,
- author: 'Tester',
- text: 'Mutation guard comment.',
- });
+ await addComment(o.mgr, {
+ session_id: o.sessionId,
+ target_paragraph_id: o.firstParaId,
+ author: 'Tester',
+ text: 'Mutation guard comment.',
+ });
- const session = opened.mgr.getSession(opened.sessionId);
- const revisionBefore = session.editRevision;
- const editCountBefore = session.editCount;
+ const session = o.mgr.getSession(o.sessionId);
+ return {
+ opened: o,
+ revisionBefore: session.editRevision,
+ editCountBefore: session.editCount,
+ };
+ },
+ );
- const result = await getComments(opened.mgr, { session_id: opened.sessionId });
- assertSuccess(result, 'get_comments');
+ await allureStep('When get_comments is called', async () => {
+ const r = await getComments(opened.mgr, { session_id: opened.sessionId });
+ assertSuccess(r, 'get_comments');
+ });
- expect(session.editRevision).toBe(revisionBefore);
- expect(session.editCount).toBe(editCountBefore);
+ await allureStep('Then session revision and edit count are unchanged', async () => {
+ const session = opened.mgr.getSession(opened.sessionId);
+ expect(session.editRevision).toBe(revisionBefore);
+ expect(session.editCount).toBe(editCountBefore);
+ });
},
);
diff --git a/packages/docx-mcp/src/tools/update_safe_docx_save_defaults_and_stable_node_ids.test.ts b/packages/docx-mcp/src/tools/update_safe_docx_save_defaults_and_stable_node_ids.test.ts
index eb3e32e..de78e03 100644
--- a/packages/docx-mcp/src/tools/update_safe_docx_save_defaults_and_stable_node_ids.test.ts
+++ b/packages/docx-mcp/src/tools/update_safe_docx_save_defaults_and_stable_node_ids.test.ts
@@ -14,7 +14,7 @@ import {
makeDocxWithDocumentXml,
makeMinimalDocx,
} from '../testing/docx_test_utils.js';
-import { testAllure } from '../testing/allure-test.js';
+import { testAllure, allureStep } from '../testing/allure-test.js';
import {
openSession,
assertSuccess,
@@ -46,344 +46,415 @@ describe('Traceability: Save Defaults and Stable Node IDs', () => {
registerCleanup();
humanReadableTest.openspec('Re-opening unchanged document yields same IDs')('Scenario: Re-opening unchanged document yields same IDs', async () => {
- const tmpDir = await createTrackedTempDir('safe-docx-id-reopen-');
- const inputPath = path.join(tmpDir, 'input.docx');
- await fs.writeFile(inputPath, new Uint8Array(await makeMinimalDocx(['A', 'B', 'C'])));
-
- const mgr1 = createTestSessionManager();
- const mgr2 = createTestSessionManager();
-
- const openedA = await openDocument(mgr1, { file_path: inputPath });
- assertSuccess(openedA, 'open A');
- const readA = await readFile(mgr1, { session_id: openedA.session_id as string, format: 'simple' });
- assertSuccess(readA, 'read A');
- const idsA = extractParaIdsFromToon(String(readA.content));
-
- const openedB = await openDocument(mgr2, { file_path: inputPath });
- assertSuccess(openedB, 'open B');
- const readB = await readFile(mgr2, { session_id: openedB.session_id as string, format: 'simple' });
- assertSuccess(readB, 'read B');
- const idsB = extractParaIdsFromToon(String(readB.content));
-
- expect(idsA).toEqual(idsB);
+ const { idsA, idsB } = await allureStep('Given the same docx opened in two independent sessions', async () => {
+ const tmpDir = await createTrackedTempDir('safe-docx-id-reopen-');
+ const inputPath = path.join(tmpDir, 'input.docx');
+ await fs.writeFile(inputPath, new Uint8Array(await makeMinimalDocx(['A', 'B', 'C'])));
+
+ const mgr1 = createTestSessionManager();
+ const mgr2 = createTestSessionManager();
+
+ const openedA = await openDocument(mgr1, { file_path: inputPath });
+ assertSuccess(openedA, 'open A');
+ const readA = await readFile(mgr1, { session_id: openedA.session_id as string, format: 'simple' });
+ assertSuccess(readA, 'read A');
+ const idsA = extractParaIdsFromToon(String(readA.content));
+
+ const openedB = await openDocument(mgr2, { file_path: inputPath });
+ assertSuccess(openedB, 'open B');
+ const readB = await readFile(mgr2, { session_id: openedB.session_id as string, format: 'simple' });
+ assertSuccess(readB, 'read B');
+ const idsB = extractParaIdsFromToon(String(readB.content));
+
+ return { idsA, idsB };
+ });
+
+ await allureStep('Then both sessions report identical paragraph IDs', () => {
+ expect(idsA).toEqual(idsB);
+ });
});
humanReadableTest.openspec('Inserting new paragraph does not renumber unrelated IDs')('Scenario: Inserting new paragraph does not renumber unrelated IDs', async () => {
- const mgr = createTestSessionManager();
- const opened = await openSession(['One', 'Two', 'Three'], { mgr });
- const [id1, id2, id3] = opened.paraIds;
-
- const inserted = await insertParagraph(mgr, {
- session_id: opened.sessionId,
- positional_anchor_node_id: id2!,
- new_string: 'Two and a half',
- instruction: 'insert without renumber',
- position: 'AFTER',
+ const { id1, id2, id3, afterIds, newParaId } = await allureStep('Given a 3-paragraph doc with a new paragraph inserted after the second', async () => {
+ const mgr = createTestSessionManager();
+ const opened = await openSession(['One', 'Two', 'Three'], { mgr });
+ const [id1, id2, id3] = opened.paraIds;
+
+ const inserted = await insertParagraph(mgr, {
+ session_id: opened.sessionId,
+ positional_anchor_node_id: id2!,
+ new_string: 'Two and a half',
+ instruction: 'insert without renumber',
+ position: 'AFTER',
+ });
+ assertSuccess(inserted, 'insert');
+
+ const after = await readFile(mgr, { session_id: opened.sessionId, format: 'simple' });
+ assertSuccess(after, 'read after');
+ const afterIds = extractParaIdsFromToon(String(after.content));
+
+ return { id1: id1!, id2: id2!, id3: id3!, afterIds, newParaId: inserted.new_paragraph_id as string };
+ });
+
+ await allureStep('Then original IDs are preserved and the new paragraph has its own ID', () => {
+ expect(afterIds).toContain(id1);
+ expect(afterIds).toContain(id2);
+ expect(afterIds).toContain(id3);
+ expect(afterIds).toContain(newParaId);
+ expect(afterIds.indexOf(id1)).toBe(0);
+ expect(afterIds.indexOf(id2)).toBe(1);
+ expect(afterIds.indexOf(id3)).toBe(3);
});
- assertSuccess(inserted, 'insert');
-
- const after = await readFile(mgr, { session_id: opened.sessionId, format: 'simple' });
- assertSuccess(after, 'read after');
- const afterIds = extractParaIdsFromToon(String(after.content));
-
- expect(afterIds).toContain(id1!);
- expect(afterIds).toContain(id2!);
- expect(afterIds).toContain(id3!);
- expect(afterIds).toContain(inserted.new_paragraph_id as string);
- expect(afterIds.indexOf(id1!)).toBe(0);
- expect(afterIds.indexOf(id2!)).toBe(1);
- expect(afterIds.indexOf(id3!)).toBe(3);
});
humanReadableTest.openspec('Two identical signature-block paragraphs remain uniquely addressable')('Scenario: Two identical signature-block paragraphs remain uniquely addressable', async () => {
- const mgr = createTestSessionManager();
- const sig = 'Supplier / By: / Name: / Title:';
- const opened = await openSession([sig, sig], { mgr });
- expect(opened.paraIds.length).toBe(2);
- expect(opened.paraIds[0]).not.toBe(opened.paraIds[1]);
-
- const edited = await replaceText(mgr, {
- session_id: opened.sessionId,
- target_paragraph_id: opened.paraIds[0]!,
- old_string: 'Supplier',
- new_string: 'Vendor',
- instruction: 'unique duplicate targeting',
+ const { readFirst, readSecond } = await allureStep('Given two identical paragraphs with an edit applied to only the first', async () => {
+ const mgr = createTestSessionManager();
+ const sig = 'Supplier / By: / Name: / Title:';
+ const opened = await openSession([sig, sig], { mgr });
+ expect(opened.paraIds.length).toBe(2);
+ expect(opened.paraIds[0]).not.toBe(opened.paraIds[1]);
+
+ const edited = await replaceText(mgr, {
+ session_id: opened.sessionId,
+ target_paragraph_id: opened.paraIds[0]!,
+ old_string: 'Supplier',
+ new_string: 'Vendor',
+ instruction: 'unique duplicate targeting',
+ });
+ assertSuccess(edited, 'edit');
+
+ const readFirst = await readFile(mgr, { session_id: opened.sessionId, node_ids: [opened.paraIds[0]!], format: 'simple' });
+ const readSecond = await readFile(mgr, { session_id: opened.sessionId, node_ids: [opened.paraIds[1]!], format: 'simple' });
+ assertSuccess(readFirst, 'read first');
+ assertSuccess(readSecond, 'read second');
+ return { readFirst, readSecond };
+ });
+
+ await allureStep('Then only the targeted paragraph reflects the edit', () => {
+ expect(String(readFirst.content)).toContain('Vendor');
+ expect(String(readSecond.content)).toContain('Supplier');
});
- assertSuccess(edited, 'edit');
-
- const readFirst = await readFile(mgr, { session_id: opened.sessionId, node_ids: [opened.paraIds[0]!], format: 'simple' });
- const readSecond = await readFile(mgr, { session_id: opened.sessionId, node_ids: [opened.paraIds[1]!], format: 'simple' });
- assertSuccess(readFirst, 'read first');
- assertSuccess(readSecond, 'read second');
- expect(String(readFirst.content)).toContain('Vendor');
- expect(String(readSecond.content)).toContain('Supplier');
});
humanReadableTest.openspec('Missing intrinsic IDs are backfilled once')('Scenario: Missing intrinsic IDs are backfilled once', async () => {
- const xml =
- `` +
- `` +
- `` +
- `` +
- `Existing id paragraph` +
- `` +
- `Needs backfill` +
- ``;
-
- const mgr = createTestSessionManager();
- const tmpDir = await createTrackedTempDir('safe-docx-backfill-');
- const inputPath = path.join(tmpDir, 'input.docx');
- await fs.writeFile(inputPath, new Uint8Array(await makeDocxWithDocumentXml(xml)));
-
- const opened = await openDocument(mgr, { file_path: inputPath });
- assertSuccess(opened, 'open');
- const sessionId = opened.session_id as string;
-
- const read1 = await readFile(mgr, { session_id: sessionId, format: 'simple' });
- const read2 = await readFile(mgr, { session_id: sessionId, format: 'simple' });
- assertSuccess(read1, 'read1');
- assertSuccess(read2, 'read2');
-
- const ids1 = extractParaIdsFromToon(String(read1.content));
- const ids2 = extractParaIdsFromToon(String(read2.content));
- expect(ids1).toContain('_bk_existing');
- expect(ids1.length).toBe(2);
- expect(ids1[1]).toMatch(/^_bk_[0-9a-f]{12}$/);
- expect(ids2).toEqual(ids1);
+ const { ids1, ids2 } = await allureStep('Given a docx with one existing and one missing bookmark ID read twice', async () => {
+ const xml =
+ `` +
+ `` +
+ `` +
+ `` +
+ `Existing id paragraph` +
+ `` +
+ `Needs backfill` +
+ ``;
+
+ const mgr = createTestSessionManager();
+ const tmpDir = await createTrackedTempDir('safe-docx-backfill-');
+ const inputPath = path.join(tmpDir, 'input.docx');
+ await fs.writeFile(inputPath, new Uint8Array(await makeDocxWithDocumentXml(xml)));
+
+ const opened = await openDocument(mgr, { file_path: inputPath });
+ assertSuccess(opened, 'open');
+ const sessionId = opened.session_id as string;
+
+ const read1 = await readFile(mgr, { session_id: sessionId, format: 'simple' });
+ const read2 = await readFile(mgr, { session_id: sessionId, format: 'simple' });
+ assertSuccess(read1, 'read1');
+ assertSuccess(read2, 'read2');
+
+ const ids1 = extractParaIdsFromToon(String(read1.content));
+ const ids2 = extractParaIdsFromToon(String(read2.content));
+ return { ids1, ids2 };
+ });
+
+ await allureStep('Then the existing ID is preserved, the missing one is backfilled, and both reads agree', () => {
+ expect(ids1).toContain('_bk_existing');
+ expect(ids1.length).toBe(2);
+ expect(ids1[1]).toMatch(/^_bk_[0-9a-f]{12}$/);
+ expect(ids2).toEqual(ids1);
+ });
});
humanReadableTest.openspec('Default download returns both variants')('Scenario: Default download returns both variants', async () => {
- const mgr = createTestSessionManager();
- const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
- const tmpDir = await createTrackedTempDir('safe-docx-download-both-');
- const cleanPath = path.join(tmpDir, 'output.clean.docx');
-
- const opened = await openDocument(mgr, { file_path: fixturePath });
- assertSuccess(opened, 'open');
- const sessionId = opened.session_id as string;
-
- const read = await readFile(mgr, { session_id: sessionId });
- assertSuccess(read, 'read');
- const paraId = firstParaIdFromToon(String(read.content));
- await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: 'The',
- new_string: 'TheX',
- instruction: 'edit before dual download',
+ const saved = await allureStep('Given an edited document saved with default options', async () => {
+ const mgr = createTestSessionManager();
+ const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
+ const tmpDir = await createTrackedTempDir('safe-docx-download-both-');
+ const cleanPath = path.join(tmpDir, 'output.clean.docx');
+
+ const opened = await openDocument(mgr, { file_path: fixturePath });
+ assertSuccess(opened, 'open');
+ const sessionId = opened.session_id as string;
+
+ const read = await readFile(mgr, { session_id: sessionId });
+ assertSuccess(read, 'read');
+ const paraId = firstParaIdFromToon(String(read.content));
+ await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: 'The',
+ new_string: 'TheX',
+ instruction: 'edit before dual download',
+ });
+
+ const saved = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: cleanPath,
+ clean_bookmarks: true,
+ });
+ assertSuccess(saved, 'save');
+ return saved;
});
- const saved = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: cleanPath,
- clean_bookmarks: true,
+ await allureStep('Then both clean and redline variants are returned', () => {
+ expect(saved.save_format).toBe('both');
+ expect(saved.returned_variants).toEqual(['clean', 'redline']);
+ expect(saved.cache_hit).toBe(false);
});
- assertSuccess(saved, 'save');
- expect(saved.save_format).toBe('both');
- expect(saved.returned_variants).toEqual(['clean', 'redline']);
- expect(saved.cache_hit).toBe(false);
});
humanReadableTest.openspec('Explicit variant override returns subset')('Scenario: Explicit variant override returns subset', async () => {
- const mgr = createTestSessionManager();
- const opened = await openSession(['Hello world'], { mgr, prefix: 'safe-docx-download-clean-only-' });
- const outputPath = path.join(opened.tmpDir, 'out.docx');
-
- const saved = await save(mgr, {
- session_id: opened.sessionId,
- save_to_local_path: outputPath,
- save_format: 'clean',
- clean_bookmarks: true,
+ const saved = await allureStep('Given a document saved with explicit clean-only format', async () => {
+ const mgr = createTestSessionManager();
+ const opened = await openSession(['Hello world'], { mgr, prefix: 'safe-docx-download-clean-only-' });
+ const outputPath = path.join(opened.tmpDir, 'out.docx');
+
+ const saved = await save(mgr, {
+ session_id: opened.sessionId,
+ save_to_local_path: outputPath,
+ save_format: 'clean',
+ clean_bookmarks: true,
+ });
+ assertSuccess(saved, 'save');
+ return saved;
+ });
+
+ await allureStep('Then only the clean variant is returned with no tracked path', () => {
+ expect(saved.returned_variants).toEqual(['clean']);
+ expect(saved.tracked_saved_to).toBeNull();
});
- assertSuccess(saved, 'save');
- expect(saved.returned_variants).toEqual(['clean']);
- expect(saved.tracked_saved_to).toBeNull();
});
humanReadableTest.openspec('Repeat download reuses cached artifacts')('Scenario: Repeat download reuses cached artifacts', async () => {
- const mgr = createTestSessionManager();
- const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
- const tmpDir = await createTrackedTempDir('safe-docx-cache-hit-');
- const cleanPath = path.join(tmpDir, 'output.clean.docx');
-
- const opened = await openDocument(mgr, { file_path: fixturePath });
- assertSuccess(opened, 'open');
- const sessionId = opened.session_id as string;
-
- const read = await readFile(mgr, { session_id: sessionId });
- assertSuccess(read, 'read');
- const paraId = firstParaIdFromToon(String(read.content));
- await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: 'The',
- new_string: 'TheX',
- instruction: 'cache test edit',
+ const { first, second } = await allureStep('Given an edited document saved twice without changes in between', async () => {
+ const mgr = createTestSessionManager();
+ const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
+ const tmpDir = await createTrackedTempDir('safe-docx-cache-hit-');
+ const cleanPath = path.join(tmpDir, 'output.clean.docx');
+
+ const opened = await openDocument(mgr, { file_path: fixturePath });
+ assertSuccess(opened, 'open');
+ const sessionId = opened.session_id as string;
+
+ const read = await readFile(mgr, { session_id: sessionId });
+ assertSuccess(read, 'read');
+ const paraId = firstParaIdFromToon(String(read.content));
+ await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: 'The',
+ new_string: 'TheX',
+ instruction: 'cache test edit',
+ });
+
+ const first = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: cleanPath,
+ clean_bookmarks: true,
+ });
+ const second = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: cleanPath,
+ clean_bookmarks: true,
+ });
+ assertSuccess(first, 'first download');
+ assertSuccess(second, 'second download');
+ return { first, second };
});
- const first = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: cleanPath,
- clean_bookmarks: true,
+ await allureStep('Then the first save is a cache miss and the second is a cache hit', () => {
+ expect(first.cache_hit).toBe(false);
+ expect(second.cache_hit).toBe(true);
});
- const second = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: cleanPath,
- clean_bookmarks: true,
- });
- assertSuccess(first, 'first download');
- assertSuccess(second, 'second download');
- expect(first.cache_hit).toBe(false);
- expect(second.cache_hit).toBe(true);
});
humanReadableTest.openspec('New edit invalidates previous revision cache')('Scenario: New edit invalidates previous revision cache', async () => {
- const mgr = createTestSessionManager();
- const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
- const tmpDir = await createTrackedTempDir('safe-docx-cache-invalidate-');
- const cleanPath = path.join(tmpDir, 'output.clean.docx');
-
- const opened = await openDocument(mgr, { file_path: fixturePath });
- assertSuccess(opened, 'open');
- const sessionId = opened.session_id as string;
-
- const read = await readFile(mgr, { session_id: sessionId });
- assertSuccess(read, 'read');
- const paraId = firstParaIdFromToon(String(read.content));
-
- await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: 'The',
- new_string: 'TheX',
- instruction: 'first revision',
- });
- const first = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: cleanPath,
- clean_bookmarks: true,
- });
- assertSuccess(first, 'first download');
- expect(first.edit_revision).toBe(1);
- expect(first.cache_hit).toBe(false);
-
- await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: 'TheX',
- new_string: 'TheXY',
- instruction: 'second revision',
+ const { first, second } = await allureStep('Given a document edited and saved twice at different revisions', async () => {
+ const mgr = createTestSessionManager();
+ const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
+ const tmpDir = await createTrackedTempDir('safe-docx-cache-invalidate-');
+ const cleanPath = path.join(tmpDir, 'output.clean.docx');
+
+ const opened = await openDocument(mgr, { file_path: fixturePath });
+ assertSuccess(opened, 'open');
+ const sessionId = opened.session_id as string;
+
+ const read = await readFile(mgr, { session_id: sessionId });
+ assertSuccess(read, 'read');
+ const paraId = firstParaIdFromToon(String(read.content));
+
+ await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: 'The',
+ new_string: 'TheX',
+ instruction: 'first revision',
+ });
+ const first = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: cleanPath,
+ clean_bookmarks: true,
+ });
+ assertSuccess(first, 'first download');
+
+ await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: 'TheX',
+ new_string: 'TheXY',
+ instruction: 'second revision',
+ });
+ const second = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: cleanPath,
+ clean_bookmarks: true,
+ });
+ assertSuccess(second, 'second download');
+ return { first, second };
});
- const second = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: cleanPath,
- clean_bookmarks: true,
+
+ await allureStep('Then each save is a cache miss with an incremented revision number', () => {
+ expect(first.edit_revision).toBe(1);
+ expect(first.cache_hit).toBe(false);
+ expect(second.edit_revision).toBe(2);
+ expect(second.cache_hit).toBe(false);
});
- assertSuccess(second, 'second download');
- expect(second.edit_revision).toBe(2);
- expect(second.cache_hit).toBe(false);
});
humanReadableTest.openspec('Anchors unchanged after dual download')('Scenario: Anchors unchanged after dual download', async () => {
- const mgr = createTestSessionManager();
- const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
- const tmpDir = await createTrackedTempDir('safe-docx-anchor-stable-');
- const cleanPath = path.join(tmpDir, 'output.clean.docx');
-
- const opened = await openDocument(mgr, { file_path: fixturePath });
- assertSuccess(opened, 'open');
- const sessionId = opened.session_id as string;
- const readResult = await readFile(mgr, { session_id: sessionId, format: 'simple' });
- assertSuccess(readResult, 'read');
- const beforeIds = extractParaIdsFromToon(String(readResult.content)).slice();
- expect(beforeIds.length).toBeGreaterThan(0);
-
- const saved = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: cleanPath,
- clean_bookmarks: true,
+ const { beforeIds, afterIds } = await allureStep('Given paragraph IDs read before and after a save', async () => {
+ const mgr = createTestSessionManager();
+ const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
+ const tmpDir = await createTrackedTempDir('safe-docx-anchor-stable-');
+ const cleanPath = path.join(tmpDir, 'output.clean.docx');
+
+ const opened = await openDocument(mgr, { file_path: fixturePath });
+ assertSuccess(opened, 'open');
+ const sessionId = opened.session_id as string;
+ const readResult = await readFile(mgr, { session_id: sessionId, format: 'simple' });
+ assertSuccess(readResult, 'read');
+ const beforeIds = extractParaIdsFromToon(String(readResult.content)).slice();
+ expect(beforeIds.length).toBeGreaterThan(0);
+
+ const saved = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: cleanPath,
+ clean_bookmarks: true,
+ });
+ assertSuccess(saved, 'save');
+
+ const afterRead = await readFile(mgr, { session_id: sessionId, format: 'simple' });
+ assertSuccess(afterRead, 'read after');
+ const afterIds = extractParaIdsFromToon(String(afterRead.content));
+ return { beforeIds, afterIds };
});
- assertSuccess(saved, 'save');
- const afterRead = await readFile(mgr, { session_id: sessionId, format: 'simple' });
- assertSuccess(afterRead, 'read after');
- const afterIds = extractParaIdsFromToon(String(afterRead.content));
- expect(afterIds).toEqual(beforeIds);
+ await allureStep('Then paragraph IDs are identical before and after the save', () => {
+ expect(afterIds).toEqual(beforeIds);
+ });
});
humanReadableTest.openspec('Generating clean artifact does not invalidate redline anchors')('Scenario: Generating clean artifact does not invalidate redline anchors', async () => {
- const mgr = createTestSessionManager();
- const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
- const tmpDir = await createTrackedTempDir('safe-docx-clean-then-redline-');
- const cleanPath = path.join(tmpDir, 'clean.docx');
- const trackedPath = path.join(tmpDir, 'redline.docx');
-
- const opened = await openDocument(mgr, { file_path: fixturePath });
- assertSuccess(opened, 'open');
- const sessionId = opened.session_id as string;
- const readResult = await readFile(mgr, { session_id: sessionId, format: 'simple' });
- assertSuccess(readResult, 'read');
- const baselineIds = extractParaIdsFromToon(String(readResult.content)).slice();
-
- const cleanOnly = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: cleanPath,
- save_format: 'clean',
- clean_bookmarks: true,
+ const { baselineIds, afterIds } = await allureStep('Given baseline IDs captured, then clean and tracked saves performed', async () => {
+ const mgr = createTestSessionManager();
+ const fixturePath = SIMPLE_WORD_CHANGE_FIXTURE;
+ const tmpDir = await createTrackedTempDir('safe-docx-clean-then-redline-');
+ const cleanPath = path.join(tmpDir, 'clean.docx');
+ const trackedPath = path.join(tmpDir, 'redline.docx');
+
+ const opened = await openDocument(mgr, { file_path: fixturePath });
+ assertSuccess(opened, 'open');
+ const sessionId = opened.session_id as string;
+ const readResult = await readFile(mgr, { session_id: sessionId, format: 'simple' });
+ assertSuccess(readResult, 'read');
+ const baselineIds = extractParaIdsFromToon(String(readResult.content)).slice();
+
+ const cleanOnly = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: cleanPath,
+ save_format: 'clean',
+ clean_bookmarks: true,
+ });
+ assertSuccess(cleanOnly, 'clean download');
+
+ const trackedOnly = await save(mgr, {
+ session_id: sessionId,
+ save_to_local_path: trackedPath,
+ save_format: 'tracked',
+ clean_bookmarks: true,
+ });
+ assertSuccess(trackedOnly, 'tracked download');
+
+ const afterRead = await readFile(mgr, { session_id: sessionId, format: 'simple' });
+ assertSuccess(afterRead, 'read after');
+ const afterIds = extractParaIdsFromToon(String(afterRead.content));
+ return { baselineIds, afterIds };
});
- assertSuccess(cleanOnly, 'clean download');
- const trackedOnly = await save(mgr, {
- session_id: sessionId,
- save_to_local_path: trackedPath,
- save_format: 'tracked',
- clean_bookmarks: true,
+ await allureStep('Then paragraph IDs remain unchanged after both saves', () => {
+ expect(afterIds).toEqual(baselineIds);
});
- assertSuccess(trackedOnly, 'tracked download');
-
- const afterRead = await readFile(mgr, { session_id: sessionId, format: 'simple' });
- assertSuccess(afterRead, 'read after');
- expect(extractParaIdsFromToon(String(afterRead.content))).toEqual(baselineIds);
});
humanReadableTest.openspec('Open response advertises download defaults')('Scenario: Open response advertises download defaults', async () => {
- const mgr = createTestSessionManager();
- const opened = await openSession(['A'], { mgr, prefix: 'safe-docx-open-metadata-' });
-
- const openResult = await openDocument(mgr, { file_path: opened.inputPath });
- assertSuccess(openResult, 'open');
- const defaultsMeta = openResult as typeof openResult & OpenSaveDefaults;
- expect(defaultsMeta.save_defaults?.default_variants).toEqual(['clean', 'redline']);
- expect(defaultsMeta.save_defaults?.default_save_format).toBe('both');
- expect(defaultsMeta.save_defaults?.supports_variant_override).toBe(true);
+ const defaultsMeta = await allureStep('Given a document opened to inspect its metadata', async () => {
+ const mgr = createTestSessionManager();
+ const opened = await openSession(['A'], { mgr, prefix: 'safe-docx-open-metadata-' });
+
+ const openResult = await openDocument(mgr, { file_path: opened.inputPath });
+ assertSuccess(openResult, 'open');
+ return openResult as typeof openResult & OpenSaveDefaults;
+ });
+
+ await allureStep('Then save_defaults advertises both variants with override support', () => {
+ expect(defaultsMeta.save_defaults?.default_variants).toEqual(['clean', 'redline']);
+ expect(defaultsMeta.save_defaults?.default_save_format).toBe('both');
+ expect(defaultsMeta.save_defaults?.supports_variant_override).toBe(true);
+ });
});
humanReadableTest.openspec('Download response reports variant and cache details')('Scenario: Download response reports variant and cache details', async () => {
- const mgr = createTestSessionManager();
- const opened = await openSession(['A'], { mgr, prefix: 'safe-docx-download-metadata-' });
- const outputPath = path.join(opened.tmpDir, 'out.docx');
-
- const first = await save(mgr, {
- session_id: opened.sessionId,
- save_to_local_path: outputPath,
- save_format: 'clean',
- clean_bookmarks: true,
- });
- const second = await save(mgr, {
- session_id: opened.sessionId,
- save_to_local_path: outputPath,
- save_format: 'clean',
- clean_bookmarks: true,
+ const { first, second } = await allureStep('Given a document saved twice in clean format', async () => {
+ const mgr = createTestSessionManager();
+ const opened = await openSession(['A'], { mgr, prefix: 'safe-docx-download-metadata-' });
+ const outputPath = path.join(opened.tmpDir, 'out.docx');
+
+ const first = await save(mgr, {
+ session_id: opened.sessionId,
+ save_to_local_path: outputPath,
+ save_format: 'clean',
+ clean_bookmarks: true,
+ });
+ const second = await save(mgr, {
+ session_id: opened.sessionId,
+ save_to_local_path: outputPath,
+ save_format: 'clean',
+ clean_bookmarks: true,
+ });
+ assertSuccess(first, 'first download');
+ assertSuccess(second, 'second download');
+ return { first, second };
});
- assertSuccess(first, 'first download');
- assertSuccess(second, 'second download');
- expect(first.returned_variants).toEqual(['clean']);
- expect(first.cache_hit).toBe(false);
- expect(typeof first.edit_revision).toBe('number');
+ await allureStep('Then both responses report variant and cache metadata correctly', () => {
+ expect(first.returned_variants).toEqual(['clean']);
+ expect(first.cache_hit).toBe(false);
+ expect(typeof first.edit_revision).toBe('number');
- expect(second.returned_variants).toEqual(['clean']);
- expect(second.cache_hit).toBe(true);
- expect(second.edit_revision).toBe(first.edit_revision);
+ expect(second.returned_variants).toEqual(['clean']);
+ expect(second.cache_hit).toBe(true);
+ expect(second.edit_revision).toBe(first.edit_revision);
+ });
});
});
diff --git a/packages/docx-mcp/src/tools/update_safe_docx_sessionless_entry_and_session_controls.test.ts b/packages/docx-mcp/src/tools/update_safe_docx_sessionless_entry_and_session_controls.test.ts
index 7a3c93f..5478108 100644
--- a/packages/docx-mcp/src/tools/update_safe_docx_sessionless_entry_and_session_controls.test.ts
+++ b/packages/docx-mcp/src/tools/update_safe_docx_sessionless_entry_and_session_controls.test.ts
@@ -12,7 +12,7 @@ import { save } from './save.js';
import { getSessionStatus } from './get_session_status.js';
import { clearSession } from './clear_session.js';
import { firstParaIdFromToon, makeMinimalDocx } from '../testing/docx_test_utils.js';
-import { testAllure } from '../testing/allure-test.js';
+import { testAllure, allureStep } from '../testing/allure-test.js';
import { assertSuccess, assertFailure, registerCleanup, createTrackedTempDir, createTestSessionManager } from '../testing/session-test-utils.js';
const TEST_FEATURE = 'update-safe-docx-sessionless-entry-and-session-controls';
@@ -38,175 +38,247 @@ describe('Traceability: Sessionless Entry and Session Controls', () => {
});
humanReadableTest.openspec('document tools accept file-first entry without pre-open')('Scenario: document tools accept file-first entry without pre-open', async () => {
- const mgr = createTestSessionManager();
- const inputPath = await createDoc(['Alpha clause']);
- const outputPath = path.join(path.dirname(inputPath), 'out.docx');
-
- const read = await readFile(mgr, { file_path: inputPath, format: 'simple' });
- assertSuccess(read, 'read');
- expect(read.session_resolution).toBe('opened_new_session');
- const paraId = firstParaIdFromToon(String(read.content));
-
- const searched = await grep(mgr, { file_path: inputPath, patterns: ['Alpha'] });
- assertSuccess(searched, 'grep');
-
- const edited = await replaceText(mgr, {
- file_path: inputPath,
- target_paragraph_id: paraId,
- old_string: 'Alpha',
- new_string: 'Beta',
- instruction: 'file-first edit',
- });
- expect(edited.success).toBe(true);
-
- const inserted = await insertParagraph(mgr, {
- file_path: inputPath,
- positional_anchor_node_id: paraId,
- new_string: 'Inserted line',
- instruction: 'file-first insert',
- position: 'AFTER',
- });
- expect(inserted.success).toBe(true);
-
- const saved = await save(mgr, {
- file_path: inputPath,
- save_to_local_path: outputPath,
- save_format: 'clean',
- });
- expect(saved.success).toBe(true);
-
- const status = await getSessionStatus(mgr, { file_path: inputPath });
- assertSuccess(status, 'status');
- expect(status.session_id).toMatch(/^ses_[A-Za-z0-9]{12}$/);
+ const { mgr, inputPath, outputPath } = await allureStep('Given a doc with one paragraph and no pre-opened session', async () => {
+ const mgr = createTestSessionManager();
+ const inputPath = await createDoc(['Alpha clause']);
+ const outputPath = path.join(path.dirname(inputPath), 'out.docx');
+ return { mgr, inputPath, outputPath };
+ });
+
+ const paraId = await allureStep('When read_file is called with file_path (no session)', async () => {
+ const read = await readFile(mgr, { file_path: inputPath, format: 'simple' });
+ assertSuccess(read, 'read');
+ expect(read.session_resolution).toBe('opened_new_session');
+ return firstParaIdFromToon(String(read.content));
+ });
+
+ await allureStep('When grep, replace_text, insert_paragraph, and save are called via file_path', async () => {
+ const searched = await grep(mgr, { file_path: inputPath, patterns: ['Alpha'] });
+ assertSuccess(searched, 'grep');
+
+ const edited = await replaceText(mgr, {
+ file_path: inputPath,
+ target_paragraph_id: paraId,
+ old_string: 'Alpha',
+ new_string: 'Beta',
+ instruction: 'file-first edit',
+ });
+ expect(edited.success).toBe(true);
+
+ const inserted = await insertParagraph(mgr, {
+ file_path: inputPath,
+ positional_anchor_node_id: paraId,
+ new_string: 'Inserted line',
+ instruction: 'file-first insert',
+ position: 'AFTER',
+ });
+ expect(inserted.success).toBe(true);
+
+ const saved = await save(mgr, {
+ file_path: inputPath,
+ save_to_local_path: outputPath,
+ save_format: 'clean',
+ });
+ expect(saved.success).toBe(true);
+ });
+
+ await allureStep('Then get_session_status returns a valid session id', async () => {
+ const status = await getSessionStatus(mgr, { file_path: inputPath });
+ assertSuccess(status, 'status');
+ expect(status.session_id).toMatch(/^ses_[A-Za-z0-9]{12}$/);
+ });
});
humanReadableTest.openspec('reuse policy selects most-recently-used session')('Scenario: reuse policy selects most-recently-used session', async () => {
- const mgr = createTestSessionManager();
- const inputPath = await createDoc(['Reuse policy text']);
-
- const first = await openDocument(mgr, { file_path: inputPath });
- const second = await openDocument(mgr, { file_path: inputPath });
- assertSuccess(first, 'open first');
- assertSuccess(second, 'open second');
-
- await getSessionStatus(mgr, { session_id: String(first.session_id) });
- const reused = await readFile(mgr, { file_path: inputPath, format: 'simple' });
- assertSuccess(reused, 'read');
- expect(reused.session_resolution).toBe('reused_existing_session');
- expect(reused.resolved_session_id).toBe(first.session_id);
+ const { mgr, inputPath, first } = await allureStep('Given two sessions opened for the same file', async () => {
+ const mgr = createTestSessionManager();
+ const inputPath = await createDoc(['Reuse policy text']);
+ const first = await openDocument(mgr, { file_path: inputPath });
+ const second = await openDocument(mgr, { file_path: inputPath });
+ assertSuccess(first, 'open first');
+ assertSuccess(second, 'open second');
+ return { mgr, inputPath, first };
+ });
+
+ await allureStep('When the first session is touched then read_file is called by file_path', async () => {
+ await getSessionStatus(mgr, { session_id: String(first.session_id) });
+ });
+
+ await allureStep('Then the most-recently-used session is reused', async () => {
+ const reused = await readFile(mgr, { file_path: inputPath, format: 'simple' });
+ assertSuccess(reused, 'read');
+ expect(reused.session_resolution).toBe('reused_existing_session');
+ expect(reused.resolved_session_id).toBe(first.session_id);
+ });
});
humanReadableTest.openspec('existing session reuse is non-blocking and warns via metadata')('Scenario: existing session reuse is non-blocking and warns via metadata', async () => {
- const mgr = createTestSessionManager();
- const inputPath = await createDoc(['Warning metadata text']);
-
- const opened = await openDocument(mgr, { file_path: inputPath });
- assertSuccess(opened, 'open');
- const sessionId = String(opened.session_id);
-
- const read = await readFile(mgr, { session_id: sessionId });
- assertSuccess(read, 'read');
- const paraId = firstParaIdFromToon(String(read.content));
-
- const edited = await replaceText(mgr, {
- session_id: sessionId,
- target_paragraph_id: paraId,
- old_string: 'Warning',
- new_string: 'WarningX',
- instruction: 'seed edit revision',
- });
- expect(edited.success).toBe(true);
-
- const reused = await grep(mgr, { file_path: inputPath, patterns: ['WarningX'] });
- assertSuccess(reused, 'grep');
- expect(reused.warning).toBeTypeOf('string');
- expect(reused.reused_existing_session).toBe(true);
- const context = reused.reused_session_context as Record;
- expect(typeof context.edit_revision).toBe('number');
- expect(typeof context.edit_count).toBe('number');
- expect(typeof context.created_at).toBe('string');
- expect(typeof context.last_used_at).toBe('string');
+ const { mgr, inputPath, sessionId } = await allureStep('Given a session with one edit applied', async () => {
+ const mgr = createTestSessionManager();
+ const inputPath = await createDoc(['Warning metadata text']);
+ const opened = await openDocument(mgr, { file_path: inputPath });
+ assertSuccess(opened, 'open');
+ const sessionId = String(opened.session_id);
+
+ const read = await readFile(mgr, { session_id: sessionId });
+ assertSuccess(read, 'read');
+ const paraId = firstParaIdFromToon(String(read.content));
+
+ const edited = await replaceText(mgr, {
+ session_id: sessionId,
+ target_paragraph_id: paraId,
+ old_string: 'Warning',
+ new_string: 'WarningX',
+ instruction: 'seed edit revision',
+ });
+ expect(edited.success).toBe(true);
+ return { mgr, inputPath, sessionId };
+ });
+
+ const reused = await allureStep('When grep is called via file_path (reusing the existing session)', async () => {
+ return await grep(mgr, { file_path: inputPath, patterns: ['WarningX'] });
+ });
+
+ await allureStep('Then response includes warning and reused session context metadata', async () => {
+ assertSuccess(reused, 'grep');
+ expect(reused.warning).toBeTypeOf('string');
+ expect(reused.reused_existing_session).toBe(true);
+ const context = reused.reused_session_context as Record;
+ expect(typeof context.edit_revision).toBe('number');
+ expect(typeof context.edit_count).toBe('number');
+ expect(typeof context.created_at).toBe('string');
+ expect(typeof context.last_used_at).toBe('string');
+ });
});
humanReadableTest.openspec('conflicting `session_id` and `file_path` is rejected')('Scenario: conflicting `session_id` and `file_path` is rejected', async () => {
- const mgr = createTestSessionManager();
- const pathA = await createDoc(['A']);
- const pathB = await createDoc(['B']);
- const opened = await openDocument(mgr, { file_path: pathA });
- assertSuccess(opened, 'open');
+ const { mgr, opened, pathB } = await allureStep('Given a session opened for file A and a different file B', async () => {
+ const mgr = createTestSessionManager();
+ const pathA = await createDoc(['A']);
+ const pathB = await createDoc(['B']);
+ const opened = await openDocument(mgr, { file_path: pathA });
+ assertSuccess(opened, 'open');
+ return { mgr, opened, pathB };
+ });
+
+ const read = await allureStep('When read_file is called with session_id from A and file_path of B', async () => {
+ return await readFile(mgr, {
+ session_id: String(opened.session_id),
+ file_path: pathB,
+ });
+ });
- const read = await readFile(mgr, {
- session_id: String(opened.session_id),
- file_path: pathB,
+ await allureStep('Then the call fails with SESSION_FILE_CONFLICT', async () => {
+ assertFailure(read, 'SESSION_FILE_CONFLICT', 'conflict');
});
- assertFailure(read, 'SESSION_FILE_CONFLICT', 'conflict');
});
humanReadableTest.openspec('quote-normalized fallback matches smart quotes and ASCII quotes')('Scenario: quote-normalized fallback matches smart quotes and ASCII quotes', async () => {
- const match = findUniqueSubstringMatch('\u201CCompany\u201D means ABC Corp.', '"Company" means ABC Corp.');
- expect(match.status).toBe('unique');
- if (match.status !== 'unique') return;
- expect(match.mode).toBe('quote_normalized');
+ const match = await allureStep('Given text with smart quotes and a search with ASCII quotes', async () => {
+ return findUniqueSubstringMatch('\u201CCompany\u201D means ABC Corp.', '"Company" means ABC Corp.');
+ });
+
+ await allureStep('Then the match is unique via quote_normalized mode', async () => {
+ expect(match.status).toBe('unique');
+ if (match.status !== 'unique') return;
+ expect(match.mode).toBe('quote_normalized');
+ });
});
humanReadableTest.openspec('flexible-whitespace fallback ignores spacing variance')('Scenario: flexible-whitespace fallback ignores spacing variance', async () => {
- const match = findUniqueSubstringMatch('The Purchase Price', 'The Purchase Price');
- expect(match.status).toBe('unique');
- if (match.status !== 'unique') return;
- expect(match.mode).toBe('flexible_whitespace');
+ const match = await allureStep('Given text with extra whitespace and a normalized search', async () => {
+ return findUniqueSubstringMatch('The Purchase Price', 'The Purchase Price');
+ });
+
+ await allureStep('Then the match is unique via flexible_whitespace mode', async () => {
+ expect(match.status).toBe('unique');
+ if (match.status !== 'unique') return;
+ expect(match.mode).toBe('flexible_whitespace');
+ });
});
humanReadableTest.openspec('quote-optional fallback matches quoted and unquoted term references')('Scenario: quote-optional fallback matches quoted and unquoted term references', async () => {
- const match = findUniqueSubstringMatch('The defined term is "Company".', 'defined term is Company.');
- expect(match.status).toBe('unique');
- if (match.status !== 'unique') return;
- expect(match.mode).toBe('quote_optional');
+ const match = await allureStep('Given text with quoted term and a search without quotes', async () => {
+ return findUniqueSubstringMatch('The defined term is "Company".', 'defined term is Company.');
+ });
+
+ await allureStep('Then the match is unique via quote_optional mode', async () => {
+ expect(match.status).toBe('unique');
+ if (match.status !== 'unique') return;
+ expect(match.mode).toBe('quote_optional');
+ });
});
humanReadableTest.openspec('quote-normalization scenarios are test-mapped in Allure coverage')('Scenario: quote-normalization scenarios are test-mapped in Allure coverage', async () => {
- expect(true).toBe(true);
+ await allureStep('Then coverage mapping is confirmed', async () => {
+ expect(true).toBe(true);
+ });
});
humanReadableTest.openspec('clear one session by id')('Scenario: clear one session by id', async () => {
- const mgr = createTestSessionManager();
- const inputPath = await createDoc(['Clear me']);
- const opened = await openDocument(mgr, { file_path: inputPath });
- assertSuccess(opened, 'open');
- const sessionId = String(opened.session_id);
+ const { mgr, sessionId } = await allureStep('Given an open session', async () => {
+ const mgr = createTestSessionManager();
+ const inputPath = await createDoc(['Clear me']);
+ const opened = await openDocument(mgr, { file_path: inputPath });
+ assertSuccess(opened, 'open');
+ return { mgr, sessionId: String(opened.session_id) };
+ });
- const cleared = await clearSession(mgr, { session_id: sessionId });
- expect(cleared.success).toBe(true);
+ await allureStep('When clear_session is called with that session_id', async () => {
+ const cleared = await clearSession(mgr, { session_id: sessionId });
+ expect(cleared.success).toBe(true);
+ });
- const status = await getSessionStatus(mgr, { session_id: sessionId });
- assertFailure(status, 'SESSION_NOT_FOUND', 'missing session');
+ await allureStep('Then get_session_status returns SESSION_NOT_FOUND', async () => {
+ const status = await getSessionStatus(mgr, { session_id: sessionId });
+ assertFailure(status, 'SESSION_NOT_FOUND', 'missing session');
+ });
});
humanReadableTest.openspec('clear sessions by file path clears all sessions for that file')('Scenario: clear sessions by file path clears all sessions for that file', async () => {
- const mgr = createTestSessionManager();
- const inputPath = await createDoc(['Clear by path']);
- const a = await openDocument(mgr, { file_path: inputPath });
- const b = await openDocument(mgr, { file_path: inputPath });
- assertSuccess(a, 'open a');
- assertSuccess(b, 'open b');
-
- const cleared = await clearSession(mgr, { file_path: inputPath });
- assertSuccess(cleared, 'clear');
- const clearedIds = (cleared.cleared_session_ids as string[]).sort();
- expect(clearedIds).toEqual([String(a.session_id), String(b.session_id)].sort());
+ const { mgr, inputPath, a, b } = await allureStep('Given two sessions opened for the same file', async () => {
+ const mgr = createTestSessionManager();
+ const inputPath = await createDoc(['Clear by path']);
+ const a = await openDocument(mgr, { file_path: inputPath });
+ const b = await openDocument(mgr, { file_path: inputPath });
+ assertSuccess(a, 'open a');
+ assertSuccess(b, 'open b');
+ return { mgr, inputPath, a, b };
+ });
+
+ const cleared = await allureStep('When clear_session is called with file_path', async () => {
+ return await clearSession(mgr, { file_path: inputPath });
+ });
+
+ await allureStep('Then both session ids are returned in cleared_session_ids', async () => {
+ assertSuccess(cleared, 'clear');
+ const clearedIds = (cleared.cleared_session_ids as string[]).sort();
+ expect(clearedIds).toEqual([String(a.session_id), String(b.session_id)].sort());
+ });
});
humanReadableTest.openspec('clear all sessions requires explicit confirmation')('Scenario: clear all sessions requires explicit confirmation', async () => {
- const mgr = createTestSessionManager();
- const clearAttempt = await clearSession(mgr, { clear_all: true });
- assertFailure(clearAttempt, 'CONFIRMATION_REQUIRED', 'confirmation');
+ const clearAttempt = await allureStep('When clear_session is called with clear_all but no confirmation', async () => {
+ const mgr = createTestSessionManager();
+ return await clearSession(mgr, { clear_all: true });
+ });
+
+ await allureStep('Then the call fails with CONFIRMATION_REQUIRED', async () => {
+ assertFailure(clearAttempt, 'CONFIRMATION_REQUIRED', 'confirmation');
+ });
});
humanReadableTest.openspec('open_document remains callable with deprecation warning')('Scenario: open_document remains callable with deprecation warning', async () => {
- const mgr = createTestSessionManager();
- const inputPath = await createDoc(['Deprecation warning']);
- const opened = await openDocument(mgr, { file_path: inputPath });
- assertSuccess(opened, 'open');
- expect((opened as Record).deprecation_warning).toBeUndefined();
+ const opened = await allureStep('When open_document is called', async () => {
+ const mgr = createTestSessionManager();
+ const inputPath = await createDoc(['Deprecation warning']);
+ return await openDocument(mgr, { file_path: inputPath });
+ });
+
+ await allureStep('Then the call succeeds and no deprecation_warning field is present', async () => {
+ assertSuccess(opened, 'open');
+ expect((opened as Record).deprecation_warning).toBeUndefined();
+ });
});
});