diff --git a/CHANGELOG.md b/CHANGELOG.md
index 133c40d1..e895d343 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,11 +7,43 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+### Fixed
+
+- **`applySyncResponse` frontier source** — now uses `_lastFrontier` (SHA map) instead of `observedFrontier` (VersionVector) as the 3rd arg to `applySyncResponseImpl`, eliminating a double-cast bug (B56).
+- **`syncWith` infinite delegation guard** — `syncWith` now calls `this.createSyncRequest()` / `this.applySyncResponse()` directly instead of `this._host.*`, preventing infinite delegation when the host delegates back to the controller.
+- **`BitmapIndexReader` strict default** — changed from `false` to `true`; shard OID validation errors now throw `ShardCorruptionError` by default instead of silently skipping. All existing callers already pass `strict: true` explicitly.
+- **`mockServerGraph` asymmetry** — `syncAuth` test helper now mocks on `_syncController.processSyncRequest` (matching `mockClientGraph` pattern) instead of shadowing the prototype with an own-property mock.
+- **Stale comment** — `BitmapIndexReader.test.js` comment corrected from "default" to "explicit override" for `strict: false` reader.
+- **OID length standardization** — all 25 short 8-char OIDs in `BitmapIndexReader.test.js` extended to 40-char zero-padded hex, matching real Git SHA-1 length and eliminating non-hex characters (`eeffgghh` → `eeff00dd…`).
+
+### Added
+
+- **Frontier fix verification test** (`SyncController.test.js`) — confirms `applySyncResponse` passes `_lastFrontier` (SHA map), not `observedFrontier` (VersionVector), as 3rd arg.
+- **Null-context guard tests** (`JoinReducer.test.js`) — 2 tests verifying `applyFast` handles `undefined` and `null` context gracefully via the `|| {}` fallback.
+- **Auto-materialize path tests** (`SyncController.test.js`) — 2 tests for `syncWith`: calls `materialize()` when `_cachedState` is null; returns `state` when `materialize: true`.
+- **HTTP sync path tests** (`SyncController.test.js`) — 9 tests covering success, 5xx/4xx status codes, invalid JSON, AbortError, TimeoutError, network error, `shouldRetry` predicate, and auth header forwarding.
+- **`serve()` deeper tests** (`SyncController.test.js`) — 3 tests verifying `HttpSyncServer` constructor args, auth config enhancement (crypto + logger injection), and graph host passthrough.
+
+## [11.5.1] — 2026-02-22 — M9 PARTITION: Architectural Decomposition
+
+Breaks apart structural DRY violations and extracts encapsulated services
+from the WarpGraph god class, without changing any public API surface.
+
### Added
-- **Publication-quality SVG diagrams** — 8 Graphviz diagrams in `docs/diagrams/` covering the empty tree trick, two-plane state model, ref layout, patch anatomy, multi-writer convergence, materialization pipeline, checkpoint tree, and hexagonal architecture. Grayscale, transparent-background, serif-font styling matching the AION paper aesthetic.
+- **Publication-quality SVG diagrams** — 8 Graphviz diagrams in `docs/diagrams/` covering data storage, two-plane state model, ref layout, patch anatomy, multi-writer convergence, materialization pipeline, checkpoint tree, and hexagonal architecture. Grayscale, transparent-background, serif-font styling matching the AION paper aesthetic.
- **`scripts/build-diagrams.sh`** — compiles all `.dot` files to SVG with transparent-background post-processing.
+- **`SyncController`** (`src/domain/services/SyncController.js`) — new class encapsulating all 9 sync methods (`getFrontier`, `hasFrontierChanged`, `status`, `createSyncRequest`, `processSyncRequest`, `applySyncResponse`, `syncNeeded`, `syncWith`, `serve`) and 2 private helpers. Independently unit-testable with a mock host object. 16 new tests.
+- **`applyFast()` / `applyWithReceipt()`** — named exported functions in `JoinReducer.js` replacing the duplicated fast/receipt code paths. `join()` is now a 3-line dispatcher. `reduceV5()` calls named functions directly. 4 new tests.
+- **`isValidOid()`** (`src/domain/utils/validateShardOid.js`) — domain-local hex OID validator (4–64 chars). `BitmapIndexReader.setup()` validates each shard OID: strict mode throws `ShardCorruptionError`, non-strict skips with warning. 13 new tests.
+
+### Changed
+
+- **`sync.methods.js`** — deleted entirely; sync methods now wired via `defineProperty` delegation to `_syncController`.
+- **`WarpGraph.js`** — added `_syncController` field instantiation in constructor (+4 LOC, now 422 LOC total — well under the 500 LOC M9 gate).
+- **`JoinReducer.join()`** — refactored from inline dual-path to dispatcher over `applyFast` / `applyWithReceipt`. Shared frontier update logic extracted into `updateFrontierFromPatch()` helper.
+
## [11.5.0] — 2026-02-20 — Content Attachment (Paper I `Atom(p)`)
Implements content attachment — the ability to attach content-addressed blobs
@@ -153,8 +185,6 @@ to prevent regressions.
- **WarpPersistence types** — Added `IndexStorage` typedef (`BlobPort & TreePort & RefPort`).
- **Policy checker upgrade** — `ts-policy-check.js` now enforces 4 rules: (1) ban `@ts-ignore`, (2) ban `@type {*}`/`@type {any}`, (3) ban embedded wildcards in JSDoc generics, (4) ban `z.any()`.
-## [Unreleased]
-
## [11.3.0] — 2026-02-17 — DX-HAMMER: Read-Path CLI Improvements
New CLI commands and improved output for graph inspection and debugging.
diff --git a/README.md b/README.md
index 227e2569..d2bf83b6 100644
--- a/README.md
+++ b/README.md
@@ -55,6 +55,10 @@ const result = await graph.query()
## How It Works
+
+
+
+
### The Multi-Writer Problem (and How It's Solved)
Multiple people (or machines, or processes) can write to the same graph **simultaneously, without any coordination**. There's no central server, no locking, no "wait your turn."
@@ -73,6 +77,10 @@ Every operation gets a unique **EventId** — `(lamport, writerId, patchSha, opI
## Multi-Writer Collaboration
+
+
+
+
Writers operate independently on the same Git repository. Sync happens through standard Git transport (push/pull) or the built-in HTTP sync protocol.
```javascript
@@ -457,6 +465,10 @@ When a seek cursor is active, `query`, `info`, `materialize`, and `history` auto
## Architecture
+
+
+
+
The codebase follows hexagonal architecture with ports and adapters:
**Ports** define abstract interfaces for infrastructure:
diff --git a/ROADMAP.md b/ROADMAP.md
index 4eaa733b..03c014a4 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -619,7 +619,7 @@ No v2.0 tag until **every** gate passes. If any RG fails: no tag. Period.
### M9.T1 — SyncController Extraction
-- **Status:** `PENDING`
+- **Status:** `DONE`
**Items:**
@@ -627,7 +627,7 @@ No v2.0 tag until **every** gate passes. If any RG fails: no tag. Period.
### M9.T2 — JoinReducer Dual-Path Refactor
-- **Status:** `PENDING`
+- **Status:** `DONE`
**Items:**
@@ -635,7 +635,7 @@ No v2.0 tag until **every** gate passes. If any RG fails: no tag. Period.
### M9.T3 — Bitmap OID Validation (Opportunistic)
-- **Status:** `PENDING`
+- **Status:** `DONE`
**Items:**
@@ -745,8 +745,13 @@ Items that can be picked up opportunistically without blocking anything. No mile
| B53 | **FIX JSR PUBLISH DRY-RUN DENO PANIC** — Deno 2.6.7 `deno_ast` panics on overlapping text changes from duplicate `roaring` import rewrites; either pin Deno version, vendor the import, or file upstream issue and add workaround |
| B54 | **`typedCustom()` ZOD HELPER** — `z.custom()` without a generic yields `unknown` in JS; a JSDoc-friendly wrapper (or `@typedef`-based pattern) would eliminate verbose `/** @type {z.ZodType} */ (z.custom(...))` casts across HttpSyncServer and future Zod schemas |
| B55 | **UPGRADE `HttpServerPort` REQUEST/RESPONSE TYPES** — `createServer` callback uses `Object` for `headers` and `string|Buffer` for response body; tighten to `Record` and extract shared request/response typedefs to avoid repeated inline casts in HttpSyncServer, NodeHttpAdapter, BunHttpAdapter, DenoHttpAdapter |
-| B56 | **INVESTIGATE `observedFrontier` / FRONTIER SEMANTIC MISMATCH** — `sync.methods.js` line 261 double-casts `observedFrontier` (a version vector `Map`) to `Map` (writer frontier) before passing to `applySyncResponseImpl`; determine whether this is a latent correctness bug or an intentional coercion, and fix or document accordingly |
+| ~~B56~~ | ~~**INVESTIGATE `observedFrontier` / FRONTIER SEMANTIC MISMATCH**~~ — DONE. Was a latent bug: `applySyncResponse()` passed `observedFrontier` (Lamport counters `Map`) instead of `_lastFrontier` (SHA frontier `Map`). Fixed to use `_lastFrontier` with `createFrontier()` fallback. |
| B57 | **CI: AUTO-VALIDATE `type-surface.m8.json` AGAINST `index.d.ts`** — add a CI gate or pre-push check that parses the manifest and confirms every declared method/property/return type matches the corresponding signature in `index.d.ts`; prevents drift like the missing `setSeekCache` and `syncWith.state` return found in review |
+| ~~B58~~ | ~~**DEAD SYNC CONSTANTS IN `_internal.js`**~~ — DONE. Deleted orphaned sync constants and re-exports from `_internal.js`. |
+| ~~B59~~ | ~~**ELIMINATE `wireWarpMethods` INDIRECTION FOR SYNC**~~ — DONE. Deleted `sync.methods.js`; 9 sync methods wired directly onto WarpGraph.prototype via `defineProperty` loop delegating to `_syncController`. |
+| ~~B60~~ | ~~**TEST HELPER: `mockOid(tag)`**~~ — DONE then REVERTED. Created speculatively but never adopted; deleted as dead code. |
+| ~~B61~~ | ~~**REMOVE DOUBLED `/** @type {any} */` ANNOTATIONS IN TESTS**~~ — DONE. Removed 66 doubled annotations across 13 test files. |
+| ~~B62~~ | ~~**DEFINE `SyncHost` INTERFACE FOR SYNCCONTROLLER**~~ — DONE. Added `@typedef SyncHost` to `SyncController.js` documenting all 12 host fields + 3 methods. |
### Conformance Property Pack (B19 + B22)
diff --git a/docs/GUIDE.md b/docs/GUIDE.md
index 1015a4e0..9dd623e1 100644
--- a/docs/GUIDE.md
+++ b/docs/GUIDE.md
@@ -87,6 +87,10 @@ const exists = await graph.hasNode('todo:1');
That's it. Your graph data is stored as Git commits — invisible to normal Git workflows but inheriting all of Git's properties.
+
+
+
+
---
## Writing Data
@@ -207,6 +211,10 @@ Before reading, you need to **materialize** — this replays all patches from al
### Materialization
+
+
+
+
```javascript
const state = await graph.materialize();
```
@@ -464,6 +472,10 @@ WarpGraph's core strength is coordination-free multi-writer collaboration. Each
### How It Works
+
+
+
+
```javascript
// === Machine A ===
const graphA = await WarpGraph.open({
@@ -497,6 +509,10 @@ const stateB = await graphB.materialize();
### Conflict Resolution
+
+
+
+
When two writers modify the same property concurrently, the conflict is resolved deterministically using **Last-Writer-Wins (LWW)** semantics. The winner is the operation with the higher priority, compared in this order:
1. Higher Lamport timestamp wins
@@ -572,6 +588,10 @@ if (changed) {
### Checkpoints
+
+
+
+
A **checkpoint** is a snapshot of materialized state at a known point in history. Without checkpoints, materialization replays every patch from every writer. With a checkpoint, it loads the snapshot and only replays patches since then.
```javascript
@@ -1348,6 +1368,10 @@ Each patch carries its version vector as causal context. This allows the reducer
### Appendix B: Git Ref Layout
+
+
+
+
```text
refs/warp//
├── writers/
@@ -1364,6 +1388,10 @@ Each writer's ref points to the tip of their patch chain. Patches are Git commit
### Appendix C: Patch Format
+
+
+
+
Each patch is a Git commit containing:
- **CBOR-encoded operations** in a blob referenced from the commit message
diff --git a/docs/diagrams/fig-architecture.dot b/docs/diagrams/fig-architecture.dot
index 90f0fd19..ffda688f 100644
--- a/docs/diagrams/fig-architecture.dot
+++ b/docs/diagrams/fig-architecture.dot
@@ -93,14 +93,16 @@ digraph architecture {
}
}
- // === Inward-pointing arrows (dependency rule) ===
- // Adapters implement Ports
- git_adapter -> persistence_port [dir=back, style=dashed, penwidth=0.5]
- cbor_codec -> codec_port [dir=back, style=dashed, penwidth=0.5]
- web_crypto -> crypto_port [dir=back, style=dashed, penwidth=0.5]
- clock_adapter -> clock_port [dir=back, style=dashed, penwidth=0.5]
- console_log -> logger_port [dir=back, style=dashed, penwidth=0.5]
- cas_cache -> cache_port [dir=back, style=dashed, penwidth=0.5]
+ // === Outward-pointing arrows (adapter implements port) ===
+ persistence_port -> git_adapter [style=dashed, penwidth=0.5, label=]
+ codec_port -> cbor_codec [style=dashed, penwidth=0.5, label=]
+ crypto_port -> web_crypto [style=dashed, penwidth=0.5, label=]
+ clock_port -> clock_adapter [style=dashed, penwidth=0.5, label=]
+ logger_port -> console_log [style=dashed, penwidth=0.5, label=]
+ cache_port -> cas_cache [style=dashed, penwidth=0.5, label=]
+
+ // IndexStoragePort is implemented via GitGraphAdapter (shared adapter)
+ index_port -> git_adapter [style=dashed, penwidth=0.5, label=]
// Domain uses Ports
warp_graph -> persistence_port [penwidth=0.5]
@@ -108,6 +110,8 @@ digraph architecture {
warp_graph -> crypto_port [penwidth=0.5]
warp_graph -> clock_port [penwidth=0.5]
warp_graph -> logger_port [penwidth=0.5]
+ warp_graph -> index_port [penwidth=0.5]
+ warp_graph -> cache_port [penwidth=0.5]
// Internal domain relationships
warp_graph -> reducer [penwidth=0.5, style=invis]
diff --git a/docs/diagrams/fig-architecture.svg b/docs/diagrams/fig-architecture.svg
index 44139a14..7c5102a5 100644
--- a/docs/diagrams/fig-architecture.svg
+++ b/docs/diagrams/fig-architecture.svg
@@ -4,221 +4,246 @@
-
-
+
+
architecture
-Fig. 8. Hexagonal Architecture — dependency rule: arrows point inward only
+Fig. 8. Hexagonal Architecture — dependency rule: arrows point inward only
cluster_adapters
-
-Adapters
- (infrastructure implementations)
+
+Adapters
+ (infrastructure implementations)
cluster_ports
-
-Ports
- (abstract interfaces)
+
+Ports
+ (abstract interfaces)
cluster_domain
-
-Domain Core
+
+Domain Core
git_adapter
-
-GitGraphAdapter
-@git-stunts/plumbing
+
+GitGraphAdapter
+@git-stunts/plumbing
+
+
+
+cbor_codec
+
+CborCodec
+cbor-x
+
+
+
+web_crypto
+
+WebCryptoAdapter
+globalThis.crypto
+
+
+
+clock_adapter
+
+ClockAdapter
+globalThis.performance
+
+
+
+console_log
+
+ConsoleLogger
+
+
+
+cas_cache
+
+CasSeekCacheAdapter
persistence_port
-
-GraphPersistencePort
+
+GraphPersistencePort
-
+
-git_adapter->persistence_port
-
-
-
-
-
-cbor_codec
-
-CborCodec
-cbor-x
+persistence_port->git_adapter
+
+
+implements
codec_port
-
-CodecPort
+
+CodecPort
-
+
-cbor_codec->codec_port
-
-
-
-
-
-web_crypto
-
-WebCryptoAdapter
-globalThis.crypto
+codec_port->cbor_codec
+
+
+implements
crypto_port
-
-CryptoPort
+
+CryptoPort
-
+
-web_crypto->crypto_port
-
-
-
-
-
-clock_adapter
-
-ClockAdapter
-globalThis.performance
+crypto_port->web_crypto
+
+
+implements
clock_port
-
-ClockPort
+
+ClockPort
-
+
-clock_adapter->clock_port
-
-
-
-
-
-console_log
-
-ConsoleLogger
+clock_port->clock_adapter
+
+
+implements
logger_port
-
-LoggerPort
+
+LoggerPort
-
+
-console_log->logger_port
-
-
+logger_port->console_log
+
+
+implements
-
-
-cas_cache
-
-CasSeekCacheAdapter
+
+
+index_port
+
+IndexStoragePort
+
+
+
+index_port->git_adapter
+
+
+via
cache_port
-
-SeekCachePort
+
+SeekCachePort
-
+
-cas_cache->cache_port
-
-
-
-
-
-index_port
-
-IndexStoragePort
+cache_port->cas_cache
+
+
+implements
warp_graph
-
-WarpGraph
-main API facade
+
+WarpGraph
+main API facade
-
+
warp_graph->persistence_port
-
-
+
+
-
+
warp_graph->codec_port
-
-
+
+
-
+
warp_graph->crypto_port
-
-
+
+
-
+
warp_graph->clock_port
-
-
+
+
-
+
warp_graph->logger_port
-
-
+
+
+
+
+
+warp_graph->index_port
+
+
+
+
+
+warp_graph->cache_port
+
+
reducer
-
-JoinReducer
+
+JoinReducer
patch_builder
-
-PatchBuilderV2
+
+PatchBuilderV2
checkpoint_svc
-
-CheckpointService
+
+CheckpointService
query_builder
-
-QueryBuilder
+
+QueryBuilder
traversal
-
-LogicalTraversal
+
+LogicalTraversal
crdt
-
-CRDTs
-VersionVector, ORSet, LWW
+
+CRDTs
+VersionVector, ORSet, LWW
diff --git a/docs/diagrams/fig-checkpoint-tree.dot b/docs/diagrams/fig-checkpoint-tree.dot
index d6e6223c..24284b6c 100644
--- a/docs/diagrams/fig-checkpoint-tree.dot
+++ b/docs/diagrams/fig-checkpoint-tree.dot
@@ -21,11 +21,12 @@ digraph checkpoint_tree {
label= checkpoints/head>]
// Checkpoint commit
- commit [shape=box, style="rounded,filled", fillcolor="#e8e8e8",
- label=<{Checkpoint Commit 7c8d9e0... |{
- eg-kind: checkpoint
- eg-checkpoint: 1
- }}>]
+ commit [shape=none, margin=0,
+ label=<
+ Checkpoint Commit 7c8d9e0...
+ eg-kind: checkpoint
+ eg-checkpoint: 1
+
>]
ckpt_ref -> commit [style=dashed, penwidth=0.5]
diff --git a/docs/diagrams/fig-checkpoint-tree.svg b/docs/diagrams/fig-checkpoint-tree.svg
index 5a2ccdf4..69389293 100644
--- a/docs/diagrams/fig-checkpoint-tree.svg
+++ b/docs/diagrams/fig-checkpoint-tree.svg
@@ -12,28 +12,27 @@
ckpt_ref
-
+
refs/warp/<graph>/
checkpoints/head
commit
-
-{
-Checkpoint Commit
-7c8d9e0...
-|{
-eg-kind: checkpoint
-
-eg-checkpoint: 1
- }}
+
+
+Checkpoint Commit
+7c8d9e0...
+
+eg-kind: checkpoint
+
+eg-checkpoint: 1
ckpt_ref->commit
-
-
+
+
@@ -65,7 +64,7 @@
visible_cbor
-
+
visible.cbor
cache only
@@ -78,7 +77,7 @@
frontier_cbor
-
+
frontier.cbor
@@ -90,7 +89,7 @@
applied_vv
-
+
appliedVV.cbor
@@ -102,7 +101,7 @@
provenance
-
+
provenanceIndex.cbor
optional
diff --git a/docs/diagrams/fig-empty-tree.dot b/docs/diagrams/fig-data-storage.dot
similarity index 79%
rename from docs/diagrams/fig-empty-tree.dot
rename to docs/diagrams/fig-data-storage.dot
index 26451542..c48321b8 100644
--- a/docs/diagrams/fig-empty-tree.dot
+++ b/docs/diagrams/fig-data-storage.dot
@@ -1,14 +1,14 @@
-// Fig 1. The Empty Tree Trick
+// Fig 1. WARP Data Storage
// Style: Git object vocabulary (i)
-// Shows: Normal Git commits vs WARP patch commits, both referencing the empty tree
+// Shows: Normal Git commits vs WARP patch commits — invisible to normal Git workflows
-digraph empty_tree {
+digraph data_storage {
bgcolor=transparent
rankdir=TB
newrank=true
fontname="Times-Roman"
fontsize=11
- label=<Fig. 1. The Empty Tree Trick — WARP patches live alongside normal Git objects >
+ label=<Fig. 1. WARP Data Storage — invisible to normal Git workflows >
labelloc=b
labeljust=c
@@ -23,7 +23,7 @@ digraph empty_tree {
color="#606060"
penwidth=0.8
fontname="Times-Roman"
- fontsize=10
+ fontsize=9
HEAD [shape=box, style="dashed,filled", fillcolor=white, label="HEAD"]
main_ref [shape=box, style="dashed,filled", fillcolor=white, label="refs/heads/main"]
@@ -57,7 +57,7 @@ digraph empty_tree {
color="#606060"
penwidth=0.8
fontname="Times-Roman"
- fontsize=10
+ fontsize=9
warp_ref [shape=box, style="dashed,filled", fillcolor=white,
label= writers/alice>]
@@ -84,20 +84,19 @@ digraph empty_tree {
WT3 -> content_blob [penwidth=0.5]
}
- // --- Empty tree ---
- empty_tree [shape=folder, style="filled,bold", fillcolor="#d0d0d0", penwidth=1.2,
- label=4b825dc... >]
-
- P1 -> empty_tree [label="tree", penwidth=0.5, style=dashed, color="#606060"]
-
// --- Invisible-to-git-log annotation ---
- invisible_note [shape=plaintext, fontsize=9, fontname="Times-Roman",
- label=<⬆ visible to git log ⬇ invisible to git log >]
+ // WARP data lives under refs/warp/ — invisible to git log, git diff, git status
+ invisible_note [shape=none, margin=0, fontsize=9, fontname="Times-Roman",
+ label=<
+ visible to git log
+
+ invisible to git log
+ (lives under refs/warp/ )
+
>]
// Ranking
{rank=same; HEAD; warp_ref}
{rank=same; C3; P3}
{rank=same; C1; P1}
{rank=same; T3; WT3}
- {rank=same; empty_tree; invisible_note}
}
diff --git a/docs/diagrams/fig-data-storage.svg b/docs/diagrams/fig-data-storage.svg
new file mode 100644
index 00000000..22fe882b
--- /dev/null
+++ b/docs/diagrams/fig-data-storage.svg
@@ -0,0 +1,219 @@
+
+
+
+
+
+
+data_storage
+Fig. 1. WARP Data Storage — invisible to normal Git workflows
+
+cluster_normal
+
+Normal Git Objects
+
+
+cluster_warp
+
+WARP Patch Objects
+
+
+
+HEAD
+
+HEAD
+
+
+
+main_ref
+
+refs/heads/main
+
+
+
+HEAD->main_ref
+
+
+
+
+
+C3
+
+C3
+a1b2c3d
+
+
+
+main_ref->C3
+
+
+
+
+
+C2
+
+C2
+e4f5a6b
+
+
+
+C3->C2
+
+
+parent
+
+
+
+T3
+
+tree
+
+
+
+C3->T3
+
+
+tree
+
+
+
+C1
+
+C1
+7c8d9e0
+
+
+
+C2->C1
+
+
+parent
+
+
+
+src_blob
+
+src/index.js
+
+
+
+T3->src_blob
+
+
+
+
+
+pkg_blob
+
+package.json
+
+
+
+T3->pkg_blob
+
+
+
+
+
+warp_ref
+
+refs/warp/myGraph/
+writers/alice
+
+
+
+P3
+
+P3
+lamport=3
+f1a2b3c
+
+
+
+warp_ref->P3
+
+
+
+
+
+P2
+
+P2
+lamport=2
+d4e5f6a
+
+
+
+P3->P2
+
+
+parent
+
+
+
+WT3
+
+tree
+
+
+
+P3->WT3
+
+
+tree
+
+
+
+P1
+
+P1
+lamport=1
+b7c8d9e
+
+
+
+P2->P1
+
+
+parent
+
+
+
+patch_blob
+
+patch.cbor
+
+
+
+WT3->patch_blob
+
+
+
+
+
+content_blob
+
+_content_*
+
+
+
+WT3->content_blob
+
+
+
+
+
+invisible_note
+visible to
+git log
+invisible to
+git log
+ (lives under
+refs/warp/
+)
+
+
+
+
diff --git a/docs/diagrams/fig-empty-tree.svg b/docs/diagrams/fig-empty-tree.svg
deleted file mode 100644
index be50db66..00000000
--- a/docs/diagrams/fig-empty-tree.svg
+++ /dev/null
@@ -1,229 +0,0 @@
-
-
-
-
-
-
-empty_tree
-Fig. 1. The Empty Tree Trick — WARP patches live alongside normal Git objects
-
-cluster_normal
-
-Normal Git Objects
-
-
-cluster_warp
-
-WARP Patch Objects
-
-
-
-HEAD
-
-HEAD
-
-
-
-main_ref
-
-refs/heads/main
-
-
-
-HEAD->main_ref
-
-
-
-
-
-C3
-
-C3
-a1b2c3d
-
-
-
-main_ref->C3
-
-
-
-
-
-C2
-
-C2
-e4f5a6b
-
-
-
-C3->C2
-
-
-parent
-
-
-
-T3
-
-tree
-
-
-
-C3->T3
-
-
-tree
-
-
-
-C1
-
-C1
-7c8d9e0
-
-
-
-C2->C1
-
-
-parent
-
-
-
-src_blob
-
-src/index.js
-
-
-
-T3->src_blob
-
-
-
-
-
-pkg_blob
-
-package.json
-
-
-
-T3->pkg_blob
-
-
-
-
-
-warp_ref
-
-refs/warp/myGraph/
-writers/alice
-
-
-
-P3
-
-P3
-lamport=3
-f1a2b3c
-
-
-
-warp_ref->P3
-
-
-
-
-
-P2
-
-P2
-lamport=2
-d4e5f6a
-
-
-
-P3->P2
-
-
-parent
-
-
-
-WT3
-
-tree
-
-
-
-P3->WT3
-
-
-tree
-
-
-
-P1
-
-P1
-lamport=1
-b7c8d9e
-
-
-
-P2->P1
-
-
-parent
-
-
-
-empty_tree
-
-Empty Tree
-4b825dc...
-
-
-
-P1->empty_tree
-
-
-tree
-
-
-
-patch_blob
-
-patch.cbor
-
-
-
-WT3->patch_blob
-
-
-
-
-
-content_blob
-
-_content_*
-
-
-
-WT3->content_blob
-
-
-
-
-
-invisible_note
-⬆ visible to
-git log
-⬇ invisible to
-git log
-
-
-
diff --git a/docs/diagrams/fig-materialize-pipeline.dot b/docs/diagrams/fig-materialize-pipeline.dot
index 2aaab812..f4dbd8a6 100644
--- a/docs/diagrams/fig-materialize-pipeline.dot
+++ b/docs/diagrams/fig-materialize-pipeline.dot
@@ -1,10 +1,10 @@
// Fig 6. Materialization Pipeline
// Style: Flow diagram (architectural)
-// Shows: Left-to-right pipeline from writer refs to materialized state
+// Shows: Top-to-bottom pipeline from writer refs to materialized state
digraph materialize_pipeline {
bgcolor=transparent
- rankdir=LR
+ rankdir=TB
fontname="Times-Roman"
fontsize=11
label=<Fig. 6. Materialization Pipeline — from refs to consistent state >
@@ -24,18 +24,22 @@ digraph materialize_pipeline {
sort [label= Lamport>]
// JoinReducer (larger, emphasized)
- reducer [label=<{JoinReducer |{
- OR-Set merge(nodes, edges) |
- LWW merge(properties)
- }}>, fillcolor="#d0d0d0", penwidth=1.2]
+ reducer [shape=none, margin=0,
+ label=<
+ JoinReducer
+ OR-Set merge(nodes, edges)
+ LWW merge(properties)
+
>, penwidth=1.2]
// Output
- state [label=<{WarpStateV5 |{
- nodeAlive |
- edgeAlive |
- prop |
- frontier
- }}>, fillcolor="#e8e8e8", penwidth=1.0]
+ state [shape=none, margin=0,
+ label=<
+ WarpStateV5
+ nodeAlive
+ edgeAlive
+ prop
+ frontier
+
>, penwidth=1.0]
// Main pipeline flow
refs -> walk -> decode -> sort -> reducer -> state
diff --git a/docs/diagrams/fig-materialize-pipeline.svg b/docs/diagrams/fig-materialize-pipeline.svg
index 76c4e535..22c2ef2f 100644
--- a/docs/diagrams/fig-materialize-pipeline.svg
+++ b/docs/diagrams/fig-materialize-pipeline.svg
@@ -4,122 +4,124 @@
-
-
+
+
materialize_pipeline
-Fig. 6. Materialization Pipeline — from refs to consistent state
+Fig. 6. Materialization Pipeline — from refs to consistent state
refs
-
-Writer
-Refs
+
+Writer
+Refs
walk
-
-Walk
-Commit DAGs
+
+Walk
+Commit DAGs
refs->walk
-
-
+
+
checkpoint
-
-Checkpoint
-(if available)
+
+Checkpoint
+(if available)
refs->checkpoint
-
-
-shortcut
+
+
+shortcut
decode
-
-Decode
-CBOR
+
+Decode
+CBOR
walk->decode
-
-
+
+
sort
-
-Sort by
-Lamport
+
+Sort by
+Lamport
decode->sort
-
-
+
+
reducer
-
-{
-JoinReducer
-|{ OR-Set merge
-(nodes, edges)
- | LWW merge
-(properties)
- }}
+
+
+
+JoinReducer
+
+OR-Set merge
+(nodes, edges)
+
+LWW merge
+(properties)
sort->reducer
-
-
+
+
state
-
-{
-WarpStateV5
-|{
-nodeAlive
- |
-edgeAlive
- |
-prop
- |
-frontier
- }}
+
+
+
+WarpStateV5
+
+nodeAlive
+
+edgeAlive
+
+prop
+
+frontier
reducer->state
-
-
+
+
checkpoint->reducer
-
-
+
+
complexity
-
-O(P) — P = total patches across all writers
+
+O(P) — P = total patches across all writers
diff --git a/docs/diagrams/fig-multi-writer.dot b/docs/diagrams/fig-multi-writer.dot
index cb56374c..f1df9245 100644
--- a/docs/diagrams/fig-multi-writer.dot
+++ b/docs/diagrams/fig-multi-writer.dot
@@ -4,7 +4,8 @@
digraph multi_writer {
bgcolor=transparent
- rankdir=LR
+ rankdir=TB
+ newrank=true
fontname="Times-Roman"
fontsize=11
label=<Fig. 5. Multi-Writer Convergence — independent chains, deterministic merge >
@@ -55,20 +56,21 @@ digraph multi_writer {
}
// === JoinReducer ===
- reducer [shape=box, style="rounded,filled", fillcolor="#d0d0d0", penwidth=1.2,
- label=<{JoinReducer |{
- OR-Set merge (skeleton) |
- LWW merge (attachments)
- }}>]
+ reducer [shape=none, margin=0, penwidth=1.2,
+ label=<
+ JoinReducer
+ OR-Set merge (skeleton) LWW merge (attachments)
+
>]
// === Materialized State ===
- state [shape=box, style="rounded,filled", fillcolor="#f0f0f0", penwidth=1.0,
- label=<{WarpStateV5 |{
- nodeAlive: ORSet |
- edgeAlive: ORSet |
- prop: Map<LWW> |
- observedFrontier: VV
- }}>]
+ state [shape=none, margin=0, penwidth=1.0,
+ label=<
+ WarpStateV5
+ nodeAlive: ORSet
+ edgeAlive: ORSet
+ prop: Map<LWW>
+ observedFrontier: VV
+
>]
// === Sort step ===
sort [shape=box, style="rounded,filled", fillcolor=white,
@@ -86,4 +88,5 @@ digraph multi_writer {
{rank=same; Pa1; Pb1}
{rank=same; Pa3; Pb2}
+ {rank=same; sort; no_coord}
}
diff --git a/docs/diagrams/fig-multi-writer.svg b/docs/diagrams/fig-multi-writer.svg
index 031e76a6..ff5c1edf 100644
--- a/docs/diagrams/fig-multi-writer.svg
+++ b/docs/diagrams/fig-multi-writer.svg
@@ -4,136 +4,143 @@
-
-
+
+
multi_writer
-Fig. 5. Multi-Writer Convergence — independent chains, deterministic merge
+Fig. 5. Multi-Writer Convergence — independent chains, deterministic merge
cluster_alice
-
-Alice
+
+Alice
+
+
+cluster_bob
+
+Bob
Pa1
-
-P
-a1
-L=1
+
+P
+a1
+L=1
Pa2
-
-P
-a2
-L=2
+
+P
+a2
+L=2
Pa1->Pa2
-
-
+
+
Pa3
-
-P
-a3
-L=4
+
+P
+a3
+L=4
Pa2->Pa3
-
-
+
+
sort
-
-Sort by
-Lamport
+
+Sort by
+Lamport
Pa3->sort
-
-
+
+
Pb1
-
-P
-b1
-L=1
+
+P
+b1
+L=1
Pb2
-
-P
-b2
-L=3
+
+P
+b2
+L=3
Pb1->Pb2
-
-
+
+
Pb2->sort
-
-
+
+
reducer
-
-{
-JoinReducer
-|{ OR-Set merge
-(skeleton) | LWW merge
-(attachments) }}
+
+
+JoinReducer
+
+OR-Set merge
+(skeleton)
+
+LWW merge
+(attachments)
state
-
-{
-WarpStateV5
-|{
-nodeAlive: ORSet
- |
-edgeAlive: ORSet
- |
-prop: Map<LWW>
- |
-observedFrontier: VV
- }}
+
+
+WarpStateV5
+
+nodeAlive: ORSet
+
+edgeAlive: ORSet
+
+prop: Map<LWW>
+
+observedFrontier: VV
reducer->state
-
-
+
+
sort->reducer
-
-
+
+
no_coord
-no coordination
-required between writers
+no coordination
+required between writers
diff --git a/docs/diagrams/fig-patch-anatomy.dot b/docs/diagrams/fig-patch-anatomy.dot
index 97216296..6de022a2 100644
--- a/docs/diagrams/fig-patch-anatomy.dot
+++ b/docs/diagrams/fig-patch-anatomy.dot
@@ -16,21 +16,23 @@ digraph patch_anatomy {
node [fontname="Times-Roman", fontsize=10, penwidth=0.8]
edge [fontname="Times-Roman", fontsize=8, penwidth=0.8]
- // The patch commit (record shape)
- commit [shape=record, style="rounded,filled", fillcolor="#e8e8e8",
- label=<{Patch Commit f1a2b3c4d5e6f7a |{
- tree: a9b8c7d... |
- parent: d4e5f6a...
- }|{Trailers:
- eg-kind: patch
- eg-graph: myGraph
- eg-writer: alice
- eg-lamport: 42
- eg-schema: 2
- eg-patch-oid: b3c4d5e...
- eg-state-hash: e6f7a8b...
- eg-frontier-oid: c7d8e9f...
- }}>]
+ // The patch commit (TABLE layout)
+ commit [shape=none, margin=0,
+ label=<
+ Patch Commit f1a2b3c
+ tree: a9b8c7d...
+ parent: d4e5f6a...
+ Trailers:
+ eg-kind: patch
+ eg-graph: myGraph
+ eg-writer: alice
+ eg-lamport: 42
+ eg-schema: 2
+ eg-patch-oid: b3c4d5e...
+ eg-state-hash: e6f7a8b...
+ eg-frontier-oid: c7d8e9f...
+
+
>]
// Parent commit
parent [shape=box, style="rounded,filled", fillcolor="#e8e8e8",
@@ -47,15 +49,16 @@ digraph patch_anatomy {
label=<_content_abc...blob value >]
// Ops detail
- ops [shape=box, style="dashed,filled", fillcolor="#f0f0f0",
- label=<{Patch Operations (schema:2)|{
- NodeAdd(id, dot) |
- NodeTombstone(id, dots) |
- EdgeAdd(from, to, label, dot) |
- EdgeTombstone(key, dots) |
- PropSet(key, val, eventId) |
- BlobValue(oid, data)
- }}>]
+ ops [shape=none, margin=0,
+ label=<
+ Patch Operations (schema:2)
+ NodeAdd(id, dot)
+ NodeTombstone(id, dots)
+ EdgeAdd(from, to, label, dot)
+ EdgeTombstone(key, dots)
+ PropSet(key, val, eventId)
+ BlobValue(oid, data)
+
>]
// Connections
commit -> parent [label="parent"]
diff --git a/docs/diagrams/fig-patch-anatomy.svg b/docs/diagrams/fig-patch-anatomy.svg
index 05d55854..d9495901 100644
--- a/docs/diagrams/fig-patch-anatomy.svg
+++ b/docs/diagrams/fig-patch-anatomy.svg
@@ -4,126 +4,126 @@
-
-
+
+
patch_anatomy
-Fig. 4. Patch Commit Anatomy — a single WARP patch commit
+Fig. 4. Patch Commit Anatomy — a single WARP patch commit
commit
-
-Patch Commit
-f1a2b3c4d5e6f7a
-
-tree:
-a9b8c7d...
-
-parent:
-d4e5f6a...
-
-
-Trailers:
-
-eg-kind: patch
-
-eg-graph: myGraph
-
-eg-writer: alice
-
-eg-lamport: 42
-
-eg-schema: 2
-
-eg-patch-oid: b3c4d5e...
-
-eg-state-hash: e6f7a8b...
-
-eg-frontier-oid: c7d8e9f...
-
+
+
+Patch Commit
+f1a2b3c
+
+tree:
+a9b8c7d...
+
+parent:
+d4e5f6a...
+
+Trailers:
+
+eg-kind: patch
+
+eg-graph: myGraph
+
+eg-writer: alice
+
+eg-lamport: 42
+
+eg-schema: 2
+
+eg-patch-oid: b3c4d5e...
+
+eg-state-hash: e6f7a8b...
+
+eg-frontier-oid: c7d8e9f...
+
parent
-
-Parent Commit
-d4e5f6a...
+
+Parent Commit
+d4e5f6a...
commit->parent
-
-
-parent
+
+
+parent
tree
-
-tree
-a9b8c7d...
+
+tree
+a9b8c7d...
commit->tree
-
-
-tree
+
+
+tree
patch_cbor
-
-patch.cbor
-CBOR-encoded ops
+
+patch.cbor
+CBOR-encoded ops
tree->patch_cbor
-
-
-entry
+
+
+entry
content_1
-
-_content_abc...
-blob value
+
+_content_abc...
+blob value
tree->content_1
-
-
-entry
+
+
+entry
ops
-
-{
-Patch Operations
- (schema:2)|{
-NodeAdd(id, dot)
- |
-NodeTombstone(id, dots)
- |
-EdgeAdd(from, to, label, dot)
- |
-EdgeTombstone(key, dots)
- |
-PropSet(key, val, eventId)
- |
-BlobValue(oid, data)
- }}
+
+
+Patch Operations
+ (schema:2)
+
+NodeAdd(id, dot)
+
+NodeTombstone(id, dots)
+
+EdgeAdd(from, to, label, dot)
+
+EdgeTombstone(key, dots)
+
+PropSet(key, val, eventId)
+
+BlobValue(oid, data)
patch_cbor->ops
-
-
-decodes to
+
+
+decodes to
diff --git a/docs/diagrams/fig-ref-layout.dot b/docs/diagrams/fig-ref-layout.dot
index d38879b0..3b1290b5 100644
--- a/docs/diagrams/fig-ref-layout.dot
+++ b/docs/diagrams/fig-ref-layout.dot
@@ -73,6 +73,21 @@ digraph ref_layout {
audit -> audit_bob
trust -> trust_rec
+ // Elision nodes (leaf refs without explicit commit targets)
+ elision_cur [shape=plaintext, fontsize=9, label="..."]
+ elision_bookmark [shape=plaintext, fontsize=9, label="..."]
+ elision_audit_a [shape=plaintext, fontsize=9, label="..."]
+ elision_audit_b [shape=plaintext, fontsize=9, label="..."]
+ elision_trust [shape=plaintext, fontsize=9, label="..."]
+ elision_cache [shape=plaintext, fontsize=9, label="..."]
+
+ cur_active -> elision_cur [style=dashed, penwidth=0.5]
+ bookmark -> elision_bookmark [style=dashed, penwidth=0.5]
+ audit_alice -> elision_audit_a [style=dashed, penwidth=0.5]
+ audit_bob -> elision_audit_b [style=dashed, penwidth=0.5]
+ trust_rec -> elision_trust [style=dashed, penwidth=0.5]
+ seek_cache -> elision_cache [style=dashed, penwidth=0.5]
+
// Ref-to-commit arrows (dashed)
alice_ref -> c_alice [style=dashed, penwidth=0.5]
bob_ref -> c_bob [style=dashed, penwidth=0.5]
diff --git a/docs/diagrams/fig-ref-layout.svg b/docs/diagrams/fig-ref-layout.svg
index 30eca800..e30349dd 100644
--- a/docs/diagrams/fig-ref-layout.svg
+++ b/docs/diagrams/fig-ref-layout.svg
@@ -4,310 +4,376 @@
-
+
ref_layout
-Fig. 3. Ref Layout — the refs/warp/ namespace
+Fig. 3. Ref Layout — the refs/warp/ namespace
refs
-
-refs/
+
+refs/
warp
-
-warp/
+
+warp/
refs->warp
-
-
+
+
graphdir
-
-<graphName>/
+
+<graphName>/
warp->graphdir
-
-
+
+
writers
-
-writers/
+
+writers/
graphdir->writers
-
-
+
+
checkpoints
-
-checkpoints/
+
+checkpoints/
graphdir->checkpoints
-
-
+
+
coverage
-
-coverage/
+
+coverage/
graphdir->coverage
-
-
+
+
cursor
-
-cursor/
+
+cursor/
graphdir->cursor
-
-
+
+
audit
-
-audit/
+
+audit/
graphdir->audit
-
-
+
+
trust
-
-trust/
+
+trust/
graphdir->trust
-
-
+
+
seek_cache
-
-seek-cache
+
+seek-cache
graphdir->seek_cache
-
-
+
+
alice_ref
-
-alice
+
+alice
writers->alice_ref
-
-
+
+
bob_ref
-
-bob
+
+bob
writers->bob_ref
-
-
+
+
ckpt_head
-
-head
+
+head
checkpoints->ckpt_head
-
-
+
+
cov_head
-
-head
+
+head
coverage->cov_head
-
-
+
+
saved
-
-saved/
+
+saved/
cursor->saved
-
-
+
+
cur_active
-
-active
+
+active
cursor->cur_active
-
-
+
+
bookmark
-
-bookmark
+
+bookmark
saved->bookmark
-
-
+
+
audit_alice
-
-alice
+
+alice
audit->audit_alice
-
-
+
+
audit_bob
-
-bob
+
+bob
audit->audit_bob
-
-
+
+
trust_rec
-
-records
+
+records
trust->trust_rec
-
-
+
+
c_alice
-
-patch commit
-a1b2c3d
+
+patch commit
+a1b2c3d
-
+
alice_ref->c_alice
-
-
+
+
c_bob
-
-patch commit
-d4e5f6a
+
+patch commit
+d4e5f6a
-
+
bob_ref->c_bob
-
-
+
+
c_ckpt
-
-checkpoint
-7c8d9e0
+
+checkpoint
+7c8d9e0
-
+
ckpt_head->c_ckpt
-
-
+
+
c_cov
-
-octopus merge
-f1a2b3c
+
+octopus merge
+f1a2b3c
-
+
cov_head->c_cov
-
-
+
+
-
+
+
+elision_cur
+...
+
+
+
+cur_active->elision_cur
+
+
+
+
+
+elision_bookmark
+...
+
+
+
+bookmark->elision_bookmark
+
+
+
+
+
+elision_audit_a
+...
+
+
+
+audit_alice->elision_audit_a
+
+
+
+
+
+elision_audit_b
+...
+
+
+
+audit_bob->elision_audit_b
+
+
+
+
+
+elision_trust
+...
+
+
+trust_rec->elision_trust
+
+
+
+
+
+elision_cache
+...
+
+
+
+seek_cache->elision_cache
+
+
+
+
+
c_cov->c_alice
-
-
-parent
+
+
+parent
-
+
c_cov->c_bob
-
-
-parent
+
+
+parent
diff --git a/docs/diagrams/fig-two-plane.dot b/docs/diagrams/fig-two-plane.dot
index f3caf2e5..192f773e 100644
--- a/docs/diagrams/fig-two-plane.dot
+++ b/docs/diagrams/fig-two-plane.dot
@@ -71,6 +71,8 @@ digraph two_plane {
v1 -> a_v1 [style=dashed, penwidth=0.5, color="#606060", label="α", fontsize=8]
v2 -> a_v2 [style=dashed, penwidth=0.5, color="#606060", label="α", fontsize=8]
v3 -> a_v3 [style=dashed, penwidth=0.5, color="#606060", label="α", fontsize=8]
- v1 -> b_e12 [style=dashed, penwidth=0.5, color="#606060", label="β", fontsize=8,
+ // Graphviz cannot source edges from edges; label clarifies the semantic
+ v1 -> b_e12 [style=dashed, penwidth=0.5, color="#606060",
+ label=<β(e12 )>, fontsize=8,
ltail=cluster_skeleton]
}
diff --git a/docs/diagrams/fig-two-plane.svg b/docs/diagrams/fig-two-plane.svg
index 1509cb8d..c900cf69 100644
--- a/docs/diagrams/fig-two-plane.svg
+++ b/docs/diagrams/fig-two-plane.svg
@@ -74,7 +74,9 @@
v1->b_e12
-β
+β(e
+12
+)
diff --git a/docs/diagrams/style.dot b/docs/diagrams/style.dot
index cc04d07c..b50ebe76 100644
--- a/docs/diagrams/style.dot
+++ b/docs/diagrams/style.dot
@@ -2,8 +2,7 @@
// style.dot — Shared style reference for AION diagram suite
// =============================================================
// Not directly #included by Graphviz. Each .dot file copies the
-// relevant defaults from this reference. The build script
-// (scripts/build-diagrams.sh) may also inject these via cat.
+// relevant defaults from this reference.
//
// Two visual vocabularies:
// (i) Git object diagrams — commits, trees, blobs, refs
diff --git a/eslint.config.js b/eslint.config.js
index 2273e6b9..9ddfba97 100644
--- a/eslint.config.js
+++ b/eslint.config.js
@@ -203,7 +203,6 @@ export default tseslint.config(
"src/domain/warp/subscribe.methods.js",
"src/domain/warp/provenance.methods.js",
"src/domain/warp/fork.methods.js",
- "src/domain/warp/sync.methods.js",
"src/domain/warp/checkpoint.methods.js",
"src/domain/warp/patch.methods.js",
"src/domain/warp/materialize.methods.js",
@@ -216,6 +215,7 @@ export default tseslint.config(
"src/domain/services/CheckpointMessageCodec.js",
"src/domain/services/AnchorMessageCodec.js",
"src/domain/services/MessageSchemaDetector.js",
+ "src/domain/services/SyncController.js",
"src/domain/services/SyncProtocol.js",
"src/domain/services/LogicalTraversal.js",
"src/domain/services/StateSerializerV5.js",
diff --git a/jsr.json b/jsr.json
index 14a521b7..0d6cb018 100644
--- a/jsr.json
+++ b/jsr.json
@@ -1,6 +1,6 @@
{
"name": "@git-stunts/git-warp",
- "version": "11.5.0",
+ "version": "11.5.1",
"imports": {
"roaring": "npm:roaring@^2.7.0"
},
diff --git a/package.json b/package.json
index d47c4b0e..a048c154 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "@git-stunts/git-warp",
- "version": "11.5.0",
+ "version": "11.5.1",
"description": "Deterministic WARP graph over Git: graph-native storage, traversal, and tooling.",
"type": "module",
"license": "Apache-2.0",
diff --git a/scripts/build-diagrams.sh b/scripts/build-diagrams.sh
index 33a04968..e16f9e78 100755
--- a/scripts/build-diagrams.sh
+++ b/scripts/build-diagrams.sh
@@ -24,11 +24,11 @@ for dotfile in "$DIAGRAM_DIR"/fig-*.dot; do
printf " %-35s → " "$name"
- if dot -Tsvg "$dotfile" -o "$svgfile" 2>/dev/null; then
- # Post-process: strip white background fills for true transparency
- # Targets the top-level polygon that Graphviz adds as a page background
- sed -i '' 's/fill="white"/fill="none"/g' "$svgfile"
-
+ warnings=$(dot -Tsvg "$dotfile" -o "$svgfile" 2>&1) && rc=0 || rc=$?
+ if [ -n "$warnings" ]; then
+ echo " WARN ($name): $warnings" >&2
+ fi
+ if [ "$rc" -eq 0 ]; then
size=$(wc -c < "$svgfile" | tr -d ' ')
size_kb=$((size / 1024))
printf "%s (%d KB)\n" "$(basename "$svgfile")" "$size_kb"
diff --git a/src/domain/WarpGraph.js b/src/domain/WarpGraph.js
index 63c9baf9..e8519c44 100644
--- a/src/domain/WarpGraph.js
+++ b/src/domain/WarpGraph.js
@@ -18,12 +18,12 @@ import defaultCrypto from './utils/defaultCrypto.js';
import defaultClock from './utils/defaultClock.js';
import LogicalTraversal from './services/LogicalTraversal.js';
import LRUCache from './utils/LRUCache.js';
+import SyncController from './services/SyncController.js';
import { wireWarpMethods } from './warp/_wire.js';
import * as queryMethods from './warp/query.methods.js';
import * as subscribeMethods from './warp/subscribe.methods.js';
import * as provenanceMethods from './warp/provenance.methods.js';
import * as forkMethods from './warp/fork.methods.js';
-import * as syncMethods from './warp/sync.methods.js';
import * as checkpointMethods from './warp/checkpoint.methods.js';
import * as patchMethods from './warp/patch.methods.js';
import * as materializeMethods from './warp/materialize.methods.js';
@@ -172,6 +172,9 @@ export default class WarpGraph {
/** @type {number} */
this._auditSkipCount = 0;
+
+ /** @type {SyncController} */
+ this._syncController = new SyncController(this);
}
/**
@@ -410,9 +413,26 @@ wireWarpMethods(WarpGraph, [
subscribeMethods,
provenanceMethods,
forkMethods,
- syncMethods,
checkpointMethods,
patchMethods,
materializeMethods,
materializeAdvancedMethods,
]);
+
+// ── Sync methods: direct delegation to SyncController (no stub file) ────────
+const syncDelegates = /** @type {const} */ ([
+ 'getFrontier', 'hasFrontierChanged', 'status',
+ 'createSyncRequest', 'processSyncRequest', 'applySyncResponse',
+ 'syncNeeded', 'syncWith', 'serve',
+]);
+for (const method of syncDelegates) {
+ Object.defineProperty(WarpGraph.prototype, method, {
+ // eslint-disable-next-line object-shorthand -- function keyword needed for `this` binding
+ value: /** @this {WarpGraph} @param {*[]} args */ function (...args) {
+ return this._syncController[method](...args);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false,
+ });
+}
diff --git a/src/domain/services/BitmapIndexReader.js b/src/domain/services/BitmapIndexReader.js
index be8a6b37..3f9765d3 100644
--- a/src/domain/services/BitmapIndexReader.js
+++ b/src/domain/services/BitmapIndexReader.js
@@ -4,6 +4,7 @@ import nullLogger from '../utils/nullLogger.js';
import LRUCache from '../utils/LRUCache.js';
import { getRoaringBitmap32 } from '../utils/roaring.js';
import { canonicalStringify } from '../utils/canonicalStringify.js';
+import { isValidShardOid } from '../utils/validateShardOid.js';
/** @typedef {import('../../ports/IndexStoragePort.js').default} IndexStoragePort */
/** @typedef {import('../types/WarpPersistence.js').IndexStorage} IndexStorage */
@@ -50,24 +51,24 @@ const computeChecksum = async (data, version, crypto) => {
* - {@link ShardCorruptionError} for invalid shard format
* - {@link ShardValidationError} for version or checksum mismatches
*
- * In non-strict mode (default), validation failures are logged as warnings
+ * In non-strict mode (strict: false), validation failures are logged as warnings
* and an empty shard is returned for graceful degradation.
*
* **Note**: Storage errors (e.g., `storage.readBlob` failures) always throw
* {@link ShardLoadError} regardless of strict mode.
*
* @example
- * // Non-strict mode (default) - graceful degradation on validation errors
+ * // Strict mode (default) - throws on any validation failure
* const reader = new BitmapIndexReader({ storage });
* reader.setup(shardOids);
* const parents = await reader.getParents('abc123...');
*
* @example
- * // Strict mode - throws on any validation failure
- * const strictReader = new BitmapIndexReader({ storage, strict: true });
- * strictReader.setup(shardOids);
+ * // Non-strict mode - graceful degradation on validation errors
+ * const lenientReader = new BitmapIndexReader({ storage, strict: false });
+ * lenientReader.setup(shardOids);
* try {
- * const parents = await strictReader.getParents('abc123...');
+ * const parents = await lenientReader.getParents('abc123...');
* } catch (err) {
* if (err instanceof ShardValidationError) {
* console.error('Shard validation failed:', err.field, err.expected, err.actual);
@@ -83,14 +84,14 @@ export default class BitmapIndexReader {
* Creates a BitmapIndexReader instance.
* @param {Object} options
* @param {IndexStoragePort} options.storage - Storage adapter for reading index data
- * @param {boolean} [options.strict=false] - If true, throw errors on validation failures; if false, log warnings and return empty shards
+ * @param {boolean} [options.strict=true] - If true, throw errors on validation failures; if false, log warnings and return empty shards
* @param {import('../../ports/LoggerPort.js').default} [options.logger] - Logger for structured logging.
* Defaults to NoOpLogger (no logging).
* @param {number} [options.maxCachedShards=100] - Maximum number of shards to keep in the LRU cache.
* When exceeded, least recently used shards are evicted to free memory.
* @param {import('../../ports/CryptoPort.js').default} [options.crypto] - CryptoPort instance for checksum verification.
*/
- constructor({ storage, strict = false, logger = nullLogger, maxCachedShards = DEFAULT_MAX_CACHED_SHARDS, crypto } = /** @type {{ storage: IndexStoragePort, strict?: boolean, logger?: LoggerPort, maxCachedShards?: number, crypto?: CryptoPort }} */ ({})) {
+ constructor({ storage, strict = true, logger = nullLogger, maxCachedShards = DEFAULT_MAX_CACHED_SHARDS, crypto } = /** @type {{ storage: IndexStoragePort, strict?: boolean, logger?: LoggerPort, maxCachedShards?: number, crypto?: CryptoPort }} */ ({})) {
if (!storage) {
throw new Error('BitmapIndexReader requires a storage adapter');
}
@@ -132,8 +133,29 @@ export default class BitmapIndexReader {
* const parents = await reader.getParents('abcd1234...'); // loads meta_ab, shards_rev_ab
*/
setup(shardOids) {
- this.shardOids = new Map(Object.entries(shardOids));
- this._idToShaCache = null; // Clear cache when shards change
+ const entries = Object.entries(shardOids);
+ /** @type {[string, string][]} */
+ const validEntries = [];
+ for (const [path, oid] of entries) {
+ if (isValidShardOid(oid)) {
+ validEntries.push([path, oid]);
+ } else if (this.strict) {
+ throw new ShardCorruptionError('Invalid shard OID', {
+ shardPath: path,
+ oid,
+ reason: 'invalid_oid',
+ });
+ } else {
+ this.logger.warn('Skipping shard with invalid OID', {
+ operation: 'setup',
+ shardPath: path,
+ oid,
+ reason: 'invalid_oid',
+ });
+ }
+ }
+ this.shardOids = new Map(validEntries);
+ this._idToShaCache = null;
this.loadedShards.clear();
}
diff --git a/src/domain/services/JoinReducer.js b/src/domain/services/JoinReducer.js
index 0aa2fa43..a496d728 100644
--- a/src/domain/services/JoinReducer.js
+++ b/src/domain/services/JoinReducer.js
@@ -342,48 +342,55 @@ function foldPatchDot(frontier, writer, lamport) {
}
/**
- * Joins a patch into state, applying all operations in order.
- *
- * This is the primary function for incorporating a single patch into WARP state.
- * It iterates through all operations in the patch, creates EventIds for causality
- * tracking, and applies each operation using `applyOpV2`.
- *
- * **Receipt Collection Mode**:
- * When `collectReceipts` is true, this function also computes the outcome of each
- * operation (applied, redundant, or superseded) and returns a TickReceipt for
- * provenance tracking. This has a small performance cost, so it's disabled by default.
- *
- * **Warning**: This function mutates `state` in place. For immutable operations,
- * clone the state first using `cloneStateV5()`.
+ * Merges a patch's context into state and folds the patch dot.
+ * @param {WarpStateV5} state
+ * @param {Object} patch
+ * @param {string} patch.writer
+ * @param {number} patch.lamport
+ * @param {Map|{[x: string]: number}} patch.context
+ */
+function updateFrontierFromPatch(state, patch) {
+ const contextVV = patch.context instanceof Map
+ ? patch.context
+ : vvDeserialize(patch.context || {});
+ state.observedFrontier = vvMerge(state.observedFrontier, contextVV);
+ foldPatchDot(state.observedFrontier, patch.writer, patch.lamport);
+}
+
+/**
+ * Applies a patch to state without receipt collection (zero overhead).
*
- * @param {WarpStateV5} state - The state to mutate. Modified in place.
+ * @param {WarpStateV5} state - The state to mutate in place
* @param {Object} patch - The patch to apply
- * @param {string} patch.writer - Writer ID who created this patch
- * @param {number} patch.lamport - Lamport timestamp of this patch
- * @param {Array<{type: string, node?: string, dot?: import('../crdt/Dot.js').Dot, observedDots?: string[], from?: string, to?: string, label?: string, key?: string, value?: unknown, oid?: string}>} patch.ops - Array of operations to apply
- * @param {Map|{[x: string]: number}} patch.context - Version vector context (Map or serialized form)
- * @param {string} patchSha - The Git SHA of the patch commit (used for EventId creation)
- * @param {boolean} [collectReceipts=false] - When true, computes and returns receipt data
- * @returns {WarpStateV5|{state: WarpStateV5, receipt: import('../types/TickReceipt.js').TickReceipt}}
- * Returns mutated state directly when collectReceipts is false;
- * returns {state, receipt} object when collectReceipts is true
+ * @param {string} patch.writer
+ * @param {number} patch.lamport
+ * @param {Array<{type: string, node?: string, dot?: import('../crdt/Dot.js').Dot, observedDots?: string[], from?: string, to?: string, label?: string, key?: string, value?: unknown, oid?: string}>} patch.ops
+ * @param {Map|{[x: string]: number}} patch.context
+ * @param {string} patchSha - Git SHA of the patch commit
+ * @returns {WarpStateV5} The mutated state
*/
-export function join(state, patch, patchSha, collectReceipts) {
- // ZERO-COST: when collectReceipts is falsy, skip all receipt logic
- if (!collectReceipts) {
- for (let i = 0; i < patch.ops.length; i++) {
- const eventId = createEventId(patch.lamport, patch.writer, patchSha, i);
- applyOpV2(state, patch.ops[i], eventId);
- }
- const contextVV = patch.context instanceof Map
- ? patch.context
- : vvDeserialize(patch.context);
- state.observedFrontier = vvMerge(state.observedFrontier, contextVV);
- foldPatchDot(state.observedFrontier, patch.writer, patch.lamport);
- return state;
+export function applyFast(state, patch, patchSha) {
+ for (let i = 0; i < patch.ops.length; i++) {
+ const eventId = createEventId(patch.lamport, patch.writer, patchSha, i);
+ applyOpV2(state, patch.ops[i], eventId);
}
+ updateFrontierFromPatch(state, patch);
+ return state;
+}
- // Receipt-enabled path
+/**
+ * Applies a patch to state with receipt collection for provenance tracking.
+ *
+ * @param {WarpStateV5} state - The state to mutate in place
+ * @param {Object} patch - The patch to apply
+ * @param {string} patch.writer
+ * @param {number} patch.lamport
+ * @param {Array<{type: string, node?: string, dot?: import('../crdt/Dot.js').Dot, observedDots?: string[], from?: string, to?: string, label?: string, key?: string, value?: unknown, oid?: string}>} patch.ops
+ * @param {Map|{[x: string]: number}} patch.context
+ * @param {string} patchSha - Git SHA of the patch commit
+ * @returns {{state: WarpStateV5, receipt: import('../types/TickReceipt.js').TickReceipt}}
+ */
+export function applyWithReceipt(state, patch, patchSha) {
/** @type {import('../types/TickReceipt.js').OpOutcome[]} */
const opResults = [];
for (let i = 0; i < patch.ops.length; i++) {
@@ -433,11 +440,7 @@ export function join(state, patch, patchSha, collectReceipts) {
opResults.push(entry);
}
- const contextVV = patch.context instanceof Map
- ? patch.context
- : vvDeserialize(patch.context);
- state.observedFrontier = vvMerge(state.observedFrontier, contextVV);
- foldPatchDot(state.observedFrontier, patch.writer, patch.lamport);
+ updateFrontierFromPatch(state, patch);
const receipt = createTickReceipt({
patchSha,
@@ -449,6 +452,39 @@ export function join(state, patch, patchSha, collectReceipts) {
return { state, receipt };
}
+/**
+ * Joins a patch into state, applying all operations in order.
+ *
+ * This is the primary function for incorporating a single patch into WARP state.
+ * It iterates through all operations in the patch, creates EventIds for causality
+ * tracking, and applies each operation using `applyOpV2`.
+ *
+ * **Receipt Collection Mode**:
+ * When `collectReceipts` is true, this function also computes the outcome of each
+ * operation (applied, redundant, or superseded) and returns a TickReceipt for
+ * provenance tracking. This has a small performance cost, so it's disabled by default.
+ *
+ * **Warning**: This function mutates `state` in place. For immutable operations,
+ * clone the state first using `cloneStateV5()`.
+ *
+ * @param {WarpStateV5} state - The state to mutate. Modified in place.
+ * @param {Object} patch - The patch to apply
+ * @param {string} patch.writer - Writer ID who created this patch
+ * @param {number} patch.lamport - Lamport timestamp of this patch
+ * @param {Array<{type: string, node?: string, dot?: import('../crdt/Dot.js').Dot, observedDots?: string[], from?: string, to?: string, label?: string, key?: string, value?: unknown, oid?: string}>} patch.ops - Array of operations to apply
+ * @param {Map|{[x: string]: number}} patch.context - Version vector context (Map or serialized form)
+ * @param {string} patchSha - The Git SHA of the patch commit (used for EventId creation)
+ * @param {boolean} [collectReceipts=false] - When true, computes and returns receipt data
+ * @returns {WarpStateV5|{state: WarpStateV5, receipt: import('../types/TickReceipt.js').TickReceipt}}
+ * Returns mutated state directly when collectReceipts is false;
+ * returns {state, receipt} object when collectReceipts is true
+ */
+export function join(state, patch, patchSha, collectReceipts) {
+ return collectReceipts
+ ? applyWithReceipt(state, patch, patchSha)
+ : applyFast(state, patch, patchSha);
+}
+
/**
* Joins two V5 states together using CRDT merge semantics.
*
@@ -560,14 +596,14 @@ export function reduceV5(patches, initialState, options) {
if (options && options.receipts) {
const receipts = [];
for (const { patch, sha } of patches) {
- const result = /** @type {{state: WarpStateV5, receipt: import('../types/TickReceipt.js').TickReceipt}} */ (join(state, patch, sha, true));
+ const result = applyWithReceipt(state, patch, sha);
receipts.push(result.receipt);
}
return { state, receipts };
}
for (const { patch, sha } of patches) {
- join(state, patch, sha);
+ applyFast(state, patch, sha);
}
return state;
}
diff --git a/src/domain/services/SyncController.js b/src/domain/services/SyncController.js
new file mode 100644
index 00000000..fe0f92a9
--- /dev/null
+++ b/src/domain/services/SyncController.js
@@ -0,0 +1,576 @@
+/**
+ * SyncController - Encapsulates all sync functionality for WarpGraph.
+ *
+ * Extracted from the original sync.methods.js free functions into a
+ * service class. WarpGraph.prototype delegates directly to this controller
+ * via defineProperty loops — no intermediate stub file.
+ *
+ * @module domain/services/SyncController
+ */
+
+import SyncError from '../errors/SyncError.js';
+import OperationAbortedError from '../errors/OperationAbortedError.js';
+import { QueryError, E_NO_STATE_MSG } from '../warp/_internal.js';
+import {
+ createSyncRequest as createSyncRequestImpl,
+ processSyncRequest as processSyncRequestImpl,
+ applySyncResponse as applySyncResponseImpl,
+ syncNeeded as syncNeededImpl,
+} from './SyncProtocol.js';
+import { retry, timeout, RetryExhaustedError, TimeoutError } from '@git-stunts/alfred';
+import { checkAborted } from '../utils/cancellation.js';
+import { createFrontier, updateFrontier } from './Frontier.js';
+import { buildWriterRef } from '../utils/RefLayout.js';
+import { collectGCMetrics } from './GCMetrics.js';
+import HttpSyncServer from './HttpSyncServer.js';
+import { signSyncRequest, canonicalizePath } from './SyncAuthService.js';
+import { isError } from '../types/WarpErrors.js';
+
+/** @typedef {import('../types/WarpPersistence.js').CorePersistence} CorePersistence */
+
+/**
+ * The host interface that SyncController depends on.
+ *
+ * Documents the exact WarpGraph surface the controller accesses,
+ * making the coupling explicit and enabling lightweight mock hosts
+ * in unit tests.
+ *
+ * @typedef {Object} SyncHost
+ * @property {import('../services/JoinReducer.js').WarpStateV5|null} _cachedState
+ * @property {Map|null} _lastFrontier
+ * @property {boolean} _stateDirty
+ * @property {number} _patchesSinceGC
+ * @property {string} _graphName
+ * @property {CorePersistence} _persistence
+ * @property {import('../../ports/ClockPort.js').default} _clock
+ * @property {import('../../ports/CodecPort.js').default} _codec
+ * @property {import('../../ports/CryptoPort.js').default} _crypto
+ * @property {import('../../ports/LoggerPort.js').default|null} _logger
+ * @property {number} _patchesSinceCheckpoint
+ * @property {(op: string, t0: number, opts?: {metrics?: string, error?: Error}) => void} _logTiming
+ * @property {(options?: Record) => Promise} materialize
+ * @property {() => Promise} discoverWriters
+ */
+
+// ── Constants ───────────────────────────────────────────────────────────────
+
+const DEFAULT_SYNC_SERVER_MAX_BYTES = 4 * 1024 * 1024;
+const DEFAULT_SYNC_WITH_RETRIES = 3;
+const DEFAULT_SYNC_WITH_BASE_DELAY_MS = 250;
+const DEFAULT_SYNC_WITH_MAX_DELAY_MS = 2000;
+const DEFAULT_SYNC_WITH_TIMEOUT_MS = 10_000;
+
+// ── Private helpers ─────────────────────────────────────────────────────────
+
+/**
+ * Compares two string→string Maps for value equality without allocating.
+ *
+ * @param {Map} a
+ * @param {Map} b
+ * @returns {boolean} True if every key in `a` has the same value in `b`
+ */
+function mapsEqual(a, b) {
+ for (const [k, v] of a) {
+ if (b.get(k) !== v) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Normalizes a sync endpoint path to ensure it starts with '/'.
+ * Returns '/sync' if no path is provided.
+ *
+ * @param {string|undefined|null} path - The sync path to normalize
+ * @returns {string} Normalized path starting with '/'
+ */
+function normalizeSyncPath(path) {
+ if (!path) {
+ return '/sync';
+ }
+ return path.startsWith('/') ? path : `/${path}`;
+}
+
+/**
+ * Builds auth headers for an outgoing sync request if auth is configured.
+ *
+ * @param {Object} params
+ * @param {{ secret: string, keyId?: string }|undefined} params.auth
+ * @param {string} params.bodyStr - Serialized request body
+ * @param {URL} params.targetUrl
+ * @param {import('../../ports/CryptoPort.js').default} params.crypto
+ * @returns {Promise>}
+ */
+async function buildSyncAuthHeaders({ auth, bodyStr, targetUrl, crypto }) {
+ if (!auth || !auth.secret) {
+ return {};
+ }
+ const bodyBuf = new TextEncoder().encode(bodyStr);
+ return await signSyncRequest(
+ {
+ method: 'POST',
+ path: canonicalizePath(targetUrl.pathname + (targetUrl.search || '')),
+ contentType: 'application/json',
+ body: bodyBuf,
+ secret: auth.secret,
+ keyId: auth.keyId || 'default',
+ },
+ { crypto },
+ );
+}
+
+// ── SyncController ──────────────────────────────────────────────────────────
+
+/**
+ * Encapsulates all sync-related operations for a WarpGraph instance.
+ */
+export default class SyncController {
+ /**
+ * @param {SyncHost} host - The WarpGraph instance (or any object satisfying SyncHost)
+ */
+ constructor(host) {
+ /** @type {SyncHost} */
+ this._host = host;
+ }
+
+ /**
+ * Returns the current frontier -- a Map of writerId -> tip SHA.
+ *
+ * @returns {Promise>} Frontier map
+ * @throws {Error} If listing refs fails
+ */
+ async getFrontier() {
+ const writerIds = await this._host.discoverWriters();
+ const frontier = createFrontier();
+
+ for (const writerId of writerIds) {
+ const writerRef = buildWriterRef(this._host._graphName, writerId);
+ const tipSha = await this._host._persistence.readRef(writerRef);
+ if (tipSha) {
+ updateFrontier(frontier, writerId, tipSha);
+ }
+ }
+
+ return frontier;
+ }
+
+ /**
+ * Checks whether any writer tip has changed since the last materialize.
+ *
+ * O(writers) comparison of stored writer tip SHAs against current refs.
+ * Cheap "has anything changed?" check without materialization.
+ *
+ * @returns {Promise} True if frontier has changed (or never materialized)
+ * @throws {Error} If listing refs fails
+ */
+ async hasFrontierChanged() {
+ if (this._host._lastFrontier === null) {
+ return true;
+ }
+
+ const current = await this.getFrontier();
+
+ if (current.size !== this._host._lastFrontier.size) {
+ return true;
+ }
+
+ for (const [writerId, tipSha] of current) {
+ if (this._host._lastFrontier.get(writerId) !== tipSha) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ /**
+ * Returns a lightweight status snapshot of the graph's operational state.
+ *
+ * This method is O(writers) and does NOT trigger materialization.
+ *
+ * @returns {Promise<{
+ * cachedState: 'fresh' | 'stale' | 'none',
+ * patchesSinceCheckpoint: number,
+ * tombstoneRatio: number,
+ * writers: number,
+ * frontier: Record,
+ * }>} The graph status
+ * @throws {Error} If listing refs fails
+ */
+ async status() {
+ // Fetch frontier once, reuse for both staleness check and return value
+ const frontier = await this.getFrontier();
+
+ // Determine cachedState
+ /** @type {'fresh' | 'stale' | 'none'} */
+ let cachedState;
+ if (this._host._cachedState === null) {
+ cachedState = 'none';
+ } else if (this._host._stateDirty || !this._host._lastFrontier ||
+ frontier.size !== this._host._lastFrontier.size ||
+ !mapsEqual(frontier, this._host._lastFrontier)) {
+ cachedState = 'stale';
+ } else {
+ cachedState = 'fresh';
+ }
+
+ // patchesSinceCheckpoint
+ const patchesSinceCheckpoint = this._host._patchesSinceCheckpoint;
+
+ // tombstoneRatio
+ let tombstoneRatio = 0;
+ if (this._host._cachedState) {
+ const metrics = collectGCMetrics(this._host._cachedState);
+ tombstoneRatio = metrics.tombstoneRatio;
+ }
+
+ // writers
+ const writers = frontier.size;
+
+ // Convert frontier Map to plain object
+ const frontierObj = Object.fromEntries(frontier);
+
+ return {
+ cachedState,
+ patchesSinceCheckpoint,
+ tombstoneRatio,
+ writers,
+ frontier: frontierObj,
+ };
+ }
+
+ /**
+ * Creates a sync request to send to a remote peer.
+ * The request contains the local frontier for comparison.
+ *
+ * @returns {Promise} The sync request
+ * @throws {Error} If listing refs fails
+ */
+ async createSyncRequest() {
+ const frontier = await this.getFrontier();
+ return createSyncRequestImpl(frontier);
+ }
+
+ /**
+ * Processes an incoming sync request and returns patches the requester needs.
+ *
+ * @param {import('./SyncProtocol.js').SyncRequest} request - The incoming sync request
+ * @returns {Promise} The sync response
+ * @throws {Error} If listing refs or reading patches fails
+ */
+ async processSyncRequest(request) {
+ const localFrontier = await this.getFrontier();
+ /** @type {CorePersistence} */
+ const persistence = this._host._persistence;
+ return await processSyncRequestImpl(
+ request,
+ localFrontier,
+ persistence,
+ this._host._graphName,
+ { codec: this._host._codec }
+ );
+ }
+
+ /**
+ * Applies a sync response to the local graph state.
+ * Updates the cached state with received patches.
+ *
+ * **Requires a cached state.**
+ *
+ * @param {import('./SyncProtocol.js').SyncResponse} response - The sync response
+ * @returns {{state: import('./JoinReducer.js').WarpStateV5, frontier: Map, applied: number}} Result with updated state and frontier
+ * @throws {import('../errors/QueryError.js').default} If no cached state exists (code: `E_NO_STATE`)
+ */
+ applySyncResponse(response) {
+ if (!this._host._cachedState) {
+ throw new QueryError(E_NO_STATE_MSG, {
+ code: 'E_NO_STATE',
+ });
+ }
+
+ const currentFrontier = this._host._lastFrontier || createFrontier();
+ const result = /** @type {{state: import('./JoinReducer.js').WarpStateV5, frontier: Map, applied: number}} */ (applySyncResponseImpl(response, this._host._cachedState, currentFrontier));
+
+ // Update cached state
+ this._host._cachedState = result.state;
+
+ // Keep _lastFrontier in sync so hasFrontierChanged() won't misreport stale.
+ this._host._lastFrontier = result.frontier;
+
+ // Track patches for GC
+ this._host._patchesSinceGC += result.applied;
+
+ // State is now in sync with the frontier -- clear dirty flag
+ this._host._stateDirty = false;
+
+ return result;
+ }
+
+ /**
+ * Checks if sync is needed with a remote frontier.
+ *
+ * @param {Map} remoteFrontier - The remote peer's frontier
+ * @returns {Promise} True if sync would transfer any patches
+ * @throws {Error} If listing refs fails
+ */
+ async syncNeeded(remoteFrontier) {
+ const localFrontier = await this.getFrontier();
+ return syncNeededImpl(localFrontier, remoteFrontier);
+ }
+
+ /**
+ * Syncs with a remote peer (HTTP or direct graph instance).
+ *
+ * @param {string|import('../WarpGraph.js').default} remote - URL or peer graph instance
+ * @param {Object} [options]
+ * @param {string} [options.path='/sync'] - Sync path (HTTP mode)
+ * @param {number} [options.retries=3] - Retry count
+ * @param {number} [options.baseDelayMs=250] - Base backoff delay
+ * @param {number} [options.maxDelayMs=2000] - Max backoff delay
+ * @param {number} [options.timeoutMs=10000] - Request timeout
+ * @param {AbortSignal} [options.signal] - Abort signal
+ * @param {(event: {type: string, attempt: number, durationMs?: number, status?: number, error?: Error}) => void} [options.onStatus]
+ * @param {boolean} [options.materialize=false] - Auto-materialize after sync
+ * @param {{ secret: string, keyId?: string }} [options.auth] - Client auth credentials
+ * @returns {Promise<{applied: number, attempts: number, state?: import('./JoinReducer.js').WarpStateV5}>}
+ */
+ async syncWith(remote, options = {}) {
+ const t0 = this._host._clock.now();
+ const {
+ path = '/sync',
+ retries = DEFAULT_SYNC_WITH_RETRIES,
+ baseDelayMs = DEFAULT_SYNC_WITH_BASE_DELAY_MS,
+ maxDelayMs = DEFAULT_SYNC_WITH_MAX_DELAY_MS,
+ timeoutMs = DEFAULT_SYNC_WITH_TIMEOUT_MS,
+ signal,
+ onStatus,
+ materialize: materializeAfterSync = false,
+ auth,
+ } = options;
+
+ const hasPathOverride = Object.prototype.hasOwnProperty.call(options, 'path');
+ const isDirectPeer = remote && typeof remote === 'object' &&
+ typeof remote.processSyncRequest === 'function';
+ let targetUrl = null;
+ if (!isDirectPeer) {
+ try {
+ targetUrl = remote instanceof URL ? new URL(remote.toString()) : new URL(/** @type {string} */ (remote));
+ } catch {
+ throw new SyncError('Invalid remote URL', {
+ code: 'E_SYNC_REMOTE_URL',
+ context: { remote },
+ });
+ }
+
+ if (!['http:', 'https:'].includes(targetUrl.protocol)) {
+ throw new SyncError('Unsupported remote URL protocol', {
+ code: 'E_SYNC_REMOTE_URL',
+ context: { protocol: targetUrl.protocol },
+ });
+ }
+
+ const normalizedPath = normalizeSyncPath(path);
+ if (!targetUrl.pathname || targetUrl.pathname === '/') {
+ targetUrl.pathname = normalizedPath;
+ } else if (hasPathOverride) {
+ targetUrl.pathname = normalizedPath;
+ }
+ targetUrl.hash = '';
+ }
+ let attempt = 0;
+ const emit = (/** @type {string} */ type, /** @type {Record} */ payload = {}) => {
+ if (typeof onStatus === 'function') {
+ onStatus(/** @type {{type: string, attempt: number}} */ ({ type, attempt, ...payload }));
+ }
+ };
+ const shouldRetry = (/** @type {unknown} */ err) => {
+ if (isDirectPeer) { return false; }
+ if (err instanceof SyncError) {
+ return ['E_SYNC_REMOTE', 'E_SYNC_TIMEOUT', 'E_SYNC_NETWORK'].includes(err.code);
+ }
+ return err instanceof TimeoutError;
+ };
+ const executeAttempt = async () => {
+ checkAborted(signal, 'syncWith');
+ attempt += 1;
+ const attemptStart = this._host._clock.now();
+ emit('connecting');
+ const request = await this.createSyncRequest();
+ emit('requestBuilt');
+ let response;
+ if (isDirectPeer) {
+ emit('requestSent');
+ response = await remote.processSyncRequest(request);
+ emit('responseReceived');
+ } else {
+ emit('requestSent');
+ const bodyStr = JSON.stringify(request);
+ const authHeaders = await buildSyncAuthHeaders({
+ auth, bodyStr, targetUrl: /** @type {URL} */ (targetUrl), crypto: this._host._crypto,
+ });
+ let res;
+ try {
+ res = await timeout(timeoutMs, (timeoutSignal) => {
+ const combinedSignal = signal
+ ? AbortSignal.any([timeoutSignal, signal])
+ : timeoutSignal;
+ return fetch(/** @type {URL} */ (targetUrl).toString(), {
+ method: 'POST',
+ headers: {
+ 'content-type': 'application/json',
+ 'accept': 'application/json',
+ ...authHeaders,
+ },
+ body: bodyStr,
+ signal: combinedSignal,
+ });
+ });
+ } catch (err) {
+ if (isError(err) && err.name === 'AbortError') {
+ throw new OperationAbortedError('syncWith', { reason: 'Signal received' });
+ }
+ if (err instanceof TimeoutError) {
+ throw new SyncError('Sync request timed out', {
+ code: 'E_SYNC_TIMEOUT',
+ context: { timeoutMs },
+ });
+ }
+ throw new SyncError('Network error', {
+ code: 'E_SYNC_NETWORK',
+ context: { message: isError(err) ? err.message : String(err) },
+ });
+ }
+
+ emit('responseReceived', { status: res.status });
+
+ if (res.status >= 500) {
+ throw new SyncError(`Remote error: ${res.status}`, {
+ code: 'E_SYNC_REMOTE',
+ context: { status: res.status },
+ });
+ }
+
+ if (res.status >= 400) {
+ throw new SyncError(`Protocol error: ${res.status}`, {
+ code: 'E_SYNC_PROTOCOL',
+ context: { status: res.status },
+ });
+ }
+
+ try {
+ response = await res.json();
+ } catch {
+ throw new SyncError('Invalid JSON response', {
+ code: 'E_SYNC_PROTOCOL',
+ context: { status: res.status },
+ });
+ }
+ }
+
+ if (!response || typeof response !== 'object' ||
+ response.type !== 'sync-response' ||
+ !response.frontier || typeof response.frontier !== 'object' || Array.isArray(response.frontier) ||
+ !Array.isArray(response.patches)) {
+ throw new SyncError('Invalid sync response', {
+ code: 'E_SYNC_PROTOCOL',
+ });
+ }
+
+ if (!this._host._cachedState) {
+ await this._host.materialize();
+ emit('materialized');
+ }
+
+ const result = this.applySyncResponse(response);
+ emit('applied', { applied: result.applied });
+
+ const durationMs = this._host._clock.now() - attemptStart;
+ emit('complete', { durationMs, applied: result.applied });
+ return { applied: result.applied, attempts: attempt };
+ };
+
+ try {
+ const syncResult = await retry(executeAttempt, {
+ retries,
+ delay: baseDelayMs,
+ maxDelay: maxDelayMs,
+ backoff: 'exponential',
+ jitter: 'decorrelated',
+ signal,
+ shouldRetry,
+ onRetry: (/** @type {Error} */ error, /** @type {number} */ attemptNumber, /** @type {number} */ delayMs) => {
+ if (typeof onStatus === 'function') {
+ onStatus(/** @type {{type: string, attempt: number, delayMs: number, error: Error}} */ ({ type: 'retrying', attempt: attemptNumber, delayMs, error }));
+ }
+ },
+ });
+
+ this._host._logTiming('syncWith', t0, { metrics: `${syncResult.applied} patches applied` });
+
+ if (materializeAfterSync) {
+ if (!this._host._cachedState) { await this._host.materialize(); }
+ return { ...syncResult, state: /** @type {import('./JoinReducer.js').WarpStateV5} */ (this._host._cachedState) };
+ }
+ return syncResult;
+ } catch (err) {
+ this._host._logTiming('syncWith', t0, { error: /** @type {Error} */ (err) });
+ if (isError(err) && err.name === 'AbortError') {
+ const abortedError = new OperationAbortedError('syncWith', { reason: 'Signal received' });
+ if (typeof onStatus === 'function') {
+ onStatus({ type: 'failed', attempt, error: abortedError });
+ }
+ throw abortedError;
+ }
+ if (err instanceof RetryExhaustedError) {
+ const cause = /** @type {Error} */ (err.cause || err);
+ if (typeof onStatus === 'function') {
+ onStatus({ type: 'failed', attempt: err.attempts, error: cause });
+ }
+ throw cause;
+ }
+ if (typeof onStatus === 'function') {
+ onStatus({ type: 'failed', attempt, error: /** @type {Error} */ (err) });
+ }
+ throw err;
+ }
+ }
+
+ /**
+ * Starts a built-in sync server for this graph.
+ *
+ * @param {Object} options
+ * @param {number} options.port - Port to listen on
+ * @param {string} [options.host='127.0.0.1'] - Host to bind
+ * @param {string} [options.path='/sync'] - Path to handle sync requests
+ * @param {number} [options.maxRequestBytes=4194304] - Max request size in bytes
+ * @param {import('../../ports/HttpServerPort.js').default} options.httpPort - HTTP server adapter
+ * @param {{ keys: Record, mode?: 'enforce'|'log-only' }} [options.auth] - Auth configuration
+ * @returns {Promise<{close: () => Promise, url: string}>} Server handle
+ * @throws {Error} If port is not a number
+ * @throws {Error} If httpPort adapter is not provided
+ */
+ async serve({ port, host = '127.0.0.1', path = '/sync', maxRequestBytes = DEFAULT_SYNC_SERVER_MAX_BYTES, httpPort, auth } = /** @type {{ port: number, httpPort: import('../../ports/HttpServerPort.js').default }} */ ({})) {
+ if (typeof port !== 'number') {
+ throw new Error('serve() requires a numeric port');
+ }
+ if (!httpPort) {
+ throw new Error('serve() requires an httpPort adapter');
+ }
+
+ const authConfig = auth
+ ? { ...auth, crypto: this._host._crypto, logger: this._host._logger || undefined }
+ : undefined;
+
+ const httpServer = new HttpSyncServer({
+ httpPort,
+ graph: /** @type {{ processSyncRequest: Function }} */ (/** @type {unknown} */ (this._host)),
+ path,
+ host,
+ maxRequestBytes,
+ auth: authConfig,
+ });
+
+ return await httpServer.listen(port);
+ }
+}
diff --git a/src/domain/utils/validateShardOid.js b/src/domain/utils/validateShardOid.js
new file mode 100644
index 00000000..231207bc
--- /dev/null
+++ b/src/domain/utils/validateShardOid.js
@@ -0,0 +1,13 @@
+/**
+ * Validates a shard Object ID (hex string, 4-64 chars).
+ *
+ * The 4-character minimum accommodates abbreviated OIDs used in test
+ * fixtures and short internal IDs. Full Git SHA-1 OIDs are 40 chars;
+ * SHA-256 OIDs are 64 chars.
+ *
+ * @param {string} oid - The OID to validate
+ * @returns {boolean} True if oid is a valid hex string of 4-64 characters
+ */
+export function isValidShardOid(oid) {
+ return typeof oid === 'string' && /^[0-9a-fA-F]{4,64}$/.test(oid);
+}
diff --git a/src/domain/warp/_internal.js b/src/domain/warp/_internal.js
index b78c641a..6a7d65cd 100644
--- a/src/domain/warp/_internal.js
+++ b/src/domain/warp/_internal.js
@@ -10,17 +10,8 @@
// ── Error constructors ──────────────────────────────────────────────────────
export { default as QueryError } from '../errors/QueryError.js';
export { default as ForkError } from '../errors/ForkError.js';
-export { default as SyncError } from '../errors/SyncError.js';
-export { default as OperationAbortedError } from '../errors/OperationAbortedError.js';
// ── Shared constants ────────────────────────────────────────────────────────
export const DEFAULT_ADJACENCY_CACHE_SIZE = 3;
export const E_NO_STATE_MSG = 'No materialized state. Call materialize() before querying, or use autoMaterialize: true (the default). See https://github.com/git-stunts/git-warp#materialization';
export const E_STALE_STATE_MSG = 'State is stale (patches written since last materialize). Call materialize() to refresh. See https://github.com/git-stunts/git-warp#materialization';
-
-// ── Sync constants ──────────────────────────────────────────────────────────
-export const DEFAULT_SYNC_SERVER_MAX_BYTES = 4 * 1024 * 1024;
-export const DEFAULT_SYNC_WITH_RETRIES = 3;
-export const DEFAULT_SYNC_WITH_BASE_DELAY_MS = 250;
-export const DEFAULT_SYNC_WITH_MAX_DELAY_MS = 2000;
-export const DEFAULT_SYNC_WITH_TIMEOUT_MS = 10_000;
diff --git a/src/domain/warp/_wiredMethods.d.ts b/src/domain/warp/_wiredMethods.d.ts
index 5560c179..9e161647 100644
--- a/src/domain/warp/_wiredMethods.d.ts
+++ b/src/domain/warp/_wiredMethods.d.ts
@@ -193,7 +193,7 @@ declare module '../WarpGraph.js' {
_relationToCheckpointHead(ckHead: string, incomingSha: string): Promise;
_validatePatchAgainstCheckpoint(writerId: string, incomingSha: string, checkpoint: unknown): Promise;
- // ── sync.methods.js ───────────────────────────────────────────────────
+ // ── SyncController (direct delegation) ─────────────────────────────────
getFrontier(): Promise>;
hasFrontierChanged(): Promise;
status(): Promise;
diff --git a/src/domain/warp/sync.methods.js b/src/domain/warp/sync.methods.js
deleted file mode 100644
index 382d000d..00000000
--- a/src/domain/warp/sync.methods.js
+++ /dev/null
@@ -1,554 +0,0 @@
-/**
- * Sync methods for WarpGraph — frontier, status, sync protocol, and HTTP serve.
- *
- * Every function uses `this` bound to a WarpGraph instance at runtime
- * via wireWarpMethods().
- *
- * @module domain/warp/sync.methods
- */
-
-import {
- SyncError,
- OperationAbortedError,
- QueryError,
- E_NO_STATE_MSG,
- DEFAULT_SYNC_SERVER_MAX_BYTES,
- DEFAULT_SYNC_WITH_RETRIES,
- DEFAULT_SYNC_WITH_BASE_DELAY_MS,
- DEFAULT_SYNC_WITH_MAX_DELAY_MS,
- DEFAULT_SYNC_WITH_TIMEOUT_MS,
-} from './_internal.js';
-import {
- createSyncRequest as createSyncRequestImpl,
- processSyncRequest as processSyncRequestImpl,
- applySyncResponse as applySyncResponseImpl,
- syncNeeded as syncNeededImpl,
-} from '../services/SyncProtocol.js';
-import { retry, timeout, RetryExhaustedError, TimeoutError } from '@git-stunts/alfred';
-import { checkAborted } from '../utils/cancellation.js';
-import { createFrontier, updateFrontier } from '../services/Frontier.js';
-import { buildWriterRef } from '../utils/RefLayout.js';
-import { collectGCMetrics } from '../services/GCMetrics.js';
-import HttpSyncServer from '../services/HttpSyncServer.js';
-import { signSyncRequest, canonicalizePath } from '../services/SyncAuthService.js';
-import { isError } from '../types/WarpErrors.js';
-
-/** @typedef {import('../types/WarpPersistence.js').CorePersistence} CorePersistence */
-
-// ── Private helpers ─────────────────────────────────────────────────────────
-
-/**
- * Normalizes a sync endpoint path to ensure it starts with '/'.
- * Returns '/sync' if no path is provided.
- *
- * @param {string|undefined|null} path - The sync path to normalize
- * @returns {string} Normalized path starting with '/'
- * @private
- */
-function normalizeSyncPath(path) {
- if (!path) {
- return '/sync';
- }
- return path.startsWith('/') ? path : `/${path}`;
-}
-
-/**
- * Builds auth headers for an outgoing sync request if auth is configured.
- *
- * @param {Object} params
- * @param {{ secret: string, keyId?: string }|undefined} params.auth
- * @param {string} params.bodyStr - Serialized request body
- * @param {URL} params.targetUrl
- * @param {import('../../ports/CryptoPort.js').default} params.crypto
- * @returns {Promise>}
- * @private
- */
-async function buildSyncAuthHeaders({ auth, bodyStr, targetUrl, crypto }) {
- if (!auth || !auth.secret) {
- return {};
- }
- const bodyBuf = new TextEncoder().encode(bodyStr);
- return await signSyncRequest(
- {
- method: 'POST',
- path: canonicalizePath(targetUrl.pathname + (targetUrl.search || '')),
- contentType: 'application/json',
- body: bodyBuf,
- secret: auth.secret,
- keyId: auth.keyId || 'default',
- },
- { crypto },
- );
-}
-
-// ── Exported methods ────────────────────────────────────────────────────────
-
-/**
- * Returns the current frontier — a Map of writerId → tip SHA.
- *
- * @this {import('../WarpGraph.js').default}
- * @returns {Promise>} Frontier map
- * @throws {Error} If listing refs fails
- */
-export async function getFrontier() {
- const writerIds = await this.discoverWriters();
- const frontier = createFrontier();
-
- for (const writerId of writerIds) {
- const writerRef = buildWriterRef(this._graphName, writerId);
- const tipSha = await this._persistence.readRef(writerRef);
- if (tipSha) {
- updateFrontier(frontier, writerId, tipSha);
- }
- }
-
- return frontier;
-}
-
-/**
- * Checks whether any writer tip has changed since the last materialize.
- *
- * O(writers) comparison of stored writer tip SHAs against current refs.
- * Cheap "has anything changed?" check without materialization.
- *
- * @this {import('../WarpGraph.js').default}
- * @returns {Promise} True if frontier has changed (or never materialized)
- * @throws {Error} If listing refs fails
- */
-export async function hasFrontierChanged() {
- if (this._lastFrontier === null) {
- return true;
- }
-
- const current = await this.getFrontier();
-
- if (current.size !== this._lastFrontier.size) {
- return true;
- }
-
- for (const [writerId, tipSha] of current) {
- if (this._lastFrontier.get(writerId) !== tipSha) {
- return true;
- }
- }
-
- return false;
-}
-
-/**
- * Returns a lightweight status snapshot of the graph's operational state.
- *
- * This method is O(writers) and does NOT trigger materialization.
- *
- * @this {import('../WarpGraph.js').default}
- * @returns {Promise<{
- * cachedState: 'fresh' | 'stale' | 'none',
- * patchesSinceCheckpoint: number,
- * tombstoneRatio: number,
- * writers: number,
- * frontier: Record,
- * }>} The graph status
- * @throws {Error} If listing refs fails
- */
-export async function status() {
- // Fetch frontier once, reuse for both staleness check and return value
- const frontier = await this.getFrontier();
-
- // Determine cachedState
- /** @type {'fresh' | 'stale' | 'none'} */
- let cachedState;
- if (this._cachedState === null) {
- cachedState = 'none';
- } else if (this._stateDirty || !this._lastFrontier ||
- frontier.size !== this._lastFrontier.size ||
- ![...frontier].every(([w, sha]) => /** @type {Map} */ (this._lastFrontier).get(w) === sha)) {
- cachedState = 'stale';
- } else {
- cachedState = 'fresh';
- }
-
- // patchesSinceCheckpoint
- const patchesSinceCheckpoint = this._patchesSinceCheckpoint;
-
- // tombstoneRatio
- let tombstoneRatio = 0;
- if (this._cachedState) {
- const metrics = collectGCMetrics(this._cachedState);
- tombstoneRatio = metrics.tombstoneRatio;
- }
-
- // writers
- const writers = frontier.size;
-
- // Convert frontier Map to plain object
- const frontierObj = Object.fromEntries(frontier);
-
- return {
- cachedState,
- patchesSinceCheckpoint,
- tombstoneRatio,
- writers,
- frontier: frontierObj,
- };
-}
-
-/**
- * Creates a sync request to send to a remote peer.
- * The request contains the local frontier for comparison.
- *
- * @this {import('../WarpGraph.js').default}
- * @returns {Promise} The sync request
- * @throws {Error} If listing refs fails
- *
- * @example
- * const request = await graph.createSyncRequest();
- * // Send request to remote peer...
- */
-export async function createSyncRequest() {
- const frontier = await this.getFrontier();
- return createSyncRequestImpl(frontier);
-}
-
-/**
- * Processes an incoming sync request and returns patches the requester needs.
- *
- * @this {import('../WarpGraph.js').default}
- * @param {import('../services/SyncProtocol.js').SyncRequest} request - The incoming sync request
- * @returns {Promise} The sync response
- * @throws {Error} If listing refs or reading patches fails
- *
- * @example
- * // Receive request from remote peer
- * const response = await graph.processSyncRequest(request);
- * // Send response back to requester...
- */
-export async function processSyncRequest(request) {
- const localFrontier = await this.getFrontier();
- /** @type {CorePersistence} */
- const persistence = this._persistence;
- return await processSyncRequestImpl(
- request,
- localFrontier,
- persistence,
- this._graphName,
- { codec: this._codec }
- );
-}
-
-/**
- * Applies a sync response to the local graph state.
- * Updates the cached state with received patches.
- *
- * **Requires a cached state.**
- *
- * @this {import('../WarpGraph.js').default}
- * @param {import('../services/SyncProtocol.js').SyncResponse} response - The sync response
- * @returns {{state: import('../services/JoinReducer.js').WarpStateV5, applied: number}} Result with updated state
- * @throws {import('../errors/QueryError.js').default} If no cached state exists (code: `E_NO_STATE`)
- *
- * @example
- * await graph.materialize(); // Cache state first
- * const result = graph.applySyncResponse(response);
- * console.log(`Applied ${result.applied} patches from remote`);
- */
-export function applySyncResponse(response) {
- if (!this._cachedState) {
- throw new QueryError(E_NO_STATE_MSG, {
- code: 'E_NO_STATE',
- });
- }
-
- const currentFrontier = /** @type {Map} */ (/** @type {unknown} */ (this._cachedState.observedFrontier));
- const result = /** @type {{state: import('../services/JoinReducer.js').WarpStateV5, frontier: Map, applied: number}} */ (applySyncResponseImpl(response, this._cachedState, currentFrontier));
-
- // Update cached state
- this._cachedState = result.state;
-
- // Keep _lastFrontier in sync so hasFrontierChanged() won't misreport stale.
- // Merge the response's per-writer tips into the stored frontier snapshot.
- if (this._lastFrontier && Array.isArray(response.patches)) {
- for (const { writerId, sha } of response.patches) {
- if (writerId && sha) {
- this._lastFrontier.set(writerId, sha);
- }
- }
- }
-
- // Track patches for GC
- this._patchesSinceGC += result.applied;
-
- // State is now in sync with the frontier — clear dirty flag
- this._stateDirty = false;
-
- return result;
-}
-
-/**
- * Checks if sync is needed with a remote frontier.
- *
- * @this {import('../WarpGraph.js').default}
- * @param {Map} remoteFrontier - The remote peer's frontier
- * @returns {Promise} True if sync would transfer any patches
- * @throws {Error} If listing refs fails
- */
-export async function syncNeeded(remoteFrontier) {
- const localFrontier = await this.getFrontier();
- return syncNeededImpl(localFrontier, remoteFrontier);
-}
-
-/**
- * Syncs with a remote peer (HTTP or direct graph instance).
- *
- * @this {import('../WarpGraph.js').default}
- * @param {string|import('../WarpGraph.js').default} remote - URL or peer graph instance
- * @param {Object} [options]
- * @param {string} [options.path='/sync'] - Sync path (HTTP mode)
- * @param {number} [options.retries=3] - Retry count
- * @param {number} [options.baseDelayMs=250] - Base backoff delay
- * @param {number} [options.maxDelayMs=2000] - Max backoff delay
- * @param {number} [options.timeoutMs=10000] - Request timeout
- * @param {AbortSignal} [options.signal] - Abort signal
- * @param {(event: {type: string, attempt: number, durationMs?: number, status?: number, error?: Error}) => void} [options.onStatus]
- * @param {boolean} [options.materialize=false] - Auto-materialize after sync
- * @param {{ secret: string, keyId?: string }} [options.auth] - Client auth credentials
- * @returns {Promise<{applied: number, attempts: number, state?: import('../services/JoinReducer.js').WarpStateV5}>}
- */
-export async function syncWith(remote, options = {}) {
- const t0 = this._clock.now();
- const {
- path = '/sync',
- retries = DEFAULT_SYNC_WITH_RETRIES,
- baseDelayMs = DEFAULT_SYNC_WITH_BASE_DELAY_MS,
- maxDelayMs = DEFAULT_SYNC_WITH_MAX_DELAY_MS,
- timeoutMs = DEFAULT_SYNC_WITH_TIMEOUT_MS,
- signal,
- onStatus,
- materialize: materializeAfterSync = false,
- auth,
- } = options;
-
- const hasPathOverride = Object.prototype.hasOwnProperty.call(options, 'path');
- const isDirectPeer = remote && typeof remote === 'object' &&
- typeof remote.processSyncRequest === 'function';
- let targetUrl = null;
- if (!isDirectPeer) {
- try {
- targetUrl = remote instanceof URL ? new URL(remote.toString()) : new URL(/** @type {string} */ (remote));
- } catch {
- throw new SyncError('Invalid remote URL', {
- code: 'E_SYNC_REMOTE_URL',
- context: { remote },
- });
- }
-
- if (!['http:', 'https:'].includes(targetUrl.protocol)) {
- throw new SyncError('Unsupported remote URL protocol', {
- code: 'E_SYNC_REMOTE_URL',
- context: { protocol: targetUrl.protocol },
- });
- }
-
- const normalizedPath = normalizeSyncPath(path);
- if (!targetUrl.pathname || targetUrl.pathname === '/') {
- targetUrl.pathname = normalizedPath;
- } else if (hasPathOverride) {
- targetUrl.pathname = normalizedPath;
- }
- targetUrl.hash = '';
- }
- let attempt = 0;
- const emit = (/** @type {string} */ type, /** @type {Record} */ payload = {}) => {
- if (typeof onStatus === 'function') {
- onStatus(/** @type {{type: string, attempt: number}} */ ({ type, attempt, ...payload }));
- }
- };
- const shouldRetry = (/** @type {unknown} */ err) => {
- if (isDirectPeer) { return false; }
- if (err instanceof SyncError) {
- return ['E_SYNC_REMOTE', 'E_SYNC_TIMEOUT', 'E_SYNC_NETWORK'].includes(err.code);
- }
- return err instanceof TimeoutError;
- };
- const executeAttempt = async () => {
- checkAborted(signal, 'syncWith');
- attempt += 1;
- const attemptStart = this._clock.now();
- emit('connecting');
- const request = await this.createSyncRequest();
- emit('requestBuilt');
- let response;
- if (isDirectPeer) {
- emit('requestSent');
- response = await remote.processSyncRequest(request);
- emit('responseReceived');
- } else {
- emit('requestSent');
- const bodyStr = JSON.stringify(request);
- const authHeaders = await buildSyncAuthHeaders({
- auth, bodyStr, targetUrl: /** @type {URL} */ (targetUrl), crypto: this._crypto,
- });
- let res;
- try {
- res = await timeout(timeoutMs, (timeoutSignal) => {
- const combinedSignal = signal
- ? AbortSignal.any([timeoutSignal, signal])
- : timeoutSignal;
- return fetch(/** @type {URL} */ (targetUrl).toString(), {
- method: 'POST',
- headers: {
- 'content-type': 'application/json',
- 'accept': 'application/json',
- ...authHeaders,
- },
- body: bodyStr,
- signal: combinedSignal,
- });
- });
- } catch (err) {
- if (isError(err) && err.name === 'AbortError') {
- throw new OperationAbortedError('syncWith', { reason: 'Signal received' });
- }
- if (err instanceof TimeoutError) {
- throw new SyncError('Sync request timed out', {
- code: 'E_SYNC_TIMEOUT',
- context: { timeoutMs },
- });
- }
- throw new SyncError('Network error', {
- code: 'E_SYNC_NETWORK',
- context: { message: isError(err) ? err.message : String(err) },
- });
- }
-
- emit('responseReceived', { status: res.status });
-
- if (res.status >= 500) {
- throw new SyncError(`Remote error: ${res.status}`, {
- code: 'E_SYNC_REMOTE',
- context: { status: res.status },
- });
- }
-
- if (res.status >= 400) {
- throw new SyncError(`Protocol error: ${res.status}`, {
- code: 'E_SYNC_PROTOCOL',
- context: { status: res.status },
- });
- }
-
- try {
- response = await res.json();
- } catch {
- throw new SyncError('Invalid JSON response', {
- code: 'E_SYNC_PROTOCOL',
- context: { status: res.status },
- });
- }
- }
-
- if (!this._cachedState) {
- await this.materialize();
- emit('materialized');
- }
-
- if (!response || typeof response !== 'object' ||
- response.type !== 'sync-response' ||
- !response.frontier || typeof response.frontier !== 'object' || Array.isArray(response.frontier) ||
- !Array.isArray(response.patches)) {
- throw new SyncError('Invalid sync response', {
- code: 'E_SYNC_PROTOCOL',
- });
- }
-
- const result = this.applySyncResponse(response);
- emit('applied', { applied: result.applied });
-
- const durationMs = this._clock.now() - attemptStart;
- emit('complete', { durationMs, applied: result.applied });
- return { applied: result.applied, attempts: attempt };
- };
-
- try {
- const syncResult = await retry(executeAttempt, {
- retries,
- delay: baseDelayMs,
- maxDelay: maxDelayMs,
- backoff: 'exponential',
- jitter: 'decorrelated',
- signal,
- shouldRetry,
- onRetry: (/** @type {Error} */ error, /** @type {number} */ attemptNumber, /** @type {number} */ delayMs) => {
- if (typeof onStatus === 'function') {
- onStatus(/** @type {{type: string, attempt: number, delayMs: number, error: Error}} */ ({ type: 'retrying', attempt: attemptNumber, delayMs, error }));
- }
- },
- });
-
- this._logTiming('syncWith', t0, { metrics: `${syncResult.applied} patches applied` });
-
- if (materializeAfterSync) {
- if (!this._cachedState) { await this.materialize(); }
- return { ...syncResult, state: /** @type {import('../services/JoinReducer.js').WarpStateV5} */ (this._cachedState) };
- }
- return syncResult;
- } catch (err) {
- this._logTiming('syncWith', t0, { error: /** @type {Error} */ (err) });
- if (isError(err) && err.name === 'AbortError') {
- const abortedError = new OperationAbortedError('syncWith', { reason: 'Signal received' });
- if (typeof onStatus === 'function') {
- onStatus({ type: 'failed', attempt, error: abortedError });
- }
- throw abortedError;
- }
- if (err instanceof RetryExhaustedError) {
- const cause = /** @type {Error} */ (err.cause || err);
- if (typeof onStatus === 'function') {
- onStatus({ type: 'failed', attempt: err.attempts, error: cause });
- }
- throw cause;
- }
- if (typeof onStatus === 'function') {
- onStatus({ type: 'failed', attempt, error: /** @type {Error} */ (err) });
- }
- throw err;
- }
-}
-
-/**
- * Starts a built-in sync server for this graph.
- *
- * @this {import('../WarpGraph.js').default}
- * @param {Object} options
- * @param {number} options.port - Port to listen on
- * @param {string} [options.host='127.0.0.1'] - Host to bind
- * @param {string} [options.path='/sync'] - Path to handle sync requests
- * @param {number} [options.maxRequestBytes=4194304] - Max request size in bytes
- * @param {import('../../ports/HttpServerPort.js').default} options.httpPort - HTTP server adapter
- * @param {{ keys: Record, mode?: 'enforce'|'log-only' }} [options.auth] - Auth configuration
- * @returns {Promise<{close: () => Promise, url: string}>} Server handle
- * @throws {Error} If port is not a number
- * @throws {Error} If httpPort adapter is not provided
- */
-export async function serve({ port, host = '127.0.0.1', path = '/sync', maxRequestBytes = DEFAULT_SYNC_SERVER_MAX_BYTES, httpPort, auth } = /** @type {{ port: number, httpPort: import('../../ports/HttpServerPort.js').default }} */ ({})) {
- if (typeof port !== 'number') {
- throw new Error('serve() requires a numeric port');
- }
- if (!httpPort) {
- throw new Error('serve() requires an httpPort adapter');
- }
-
- const authConfig = auth
- ? { ...auth, crypto: this._crypto, logger: this._logger || undefined }
- : undefined;
-
- const httpServer = new HttpSyncServer({
- httpPort,
- graph: this,
- path,
- host,
- maxRequestBytes,
- auth: authConfig,
- });
-
- return await httpServer.listen(port);
-}
diff --git a/test/unit/domain/WarpGraph.syncAuth.test.js b/test/unit/domain/WarpGraph.syncAuth.test.js
index 9b082f13..e97598a8 100644
--- a/test/unit/domain/WarpGraph.syncAuth.test.js
+++ b/test/unit/domain/WarpGraph.syncAuth.test.js
@@ -13,16 +13,26 @@ async function createGraph(writerId = 'writer-1') {
return WarpGraph.open({ persistence: mockPersistence, graphName: 'test', writerId });
}
+/**
+ * Mocks on _syncController — syncWith calls createSyncRequest/applySyncResponse
+ * as this.method() inside SyncController, so instance-level mocks won't intercept.
+ */
function mockClientGraph(/** @type {WarpGraph} */ graph) {
const g = /** @type {Record} */ (/** @type {unknown} */ (graph));
g._cachedState = {};
- g.applySyncResponse = vi.fn().mockReturnValue({ applied: 0 });
- g.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
+ const sc = /** @type {Record} */ (g._syncController);
+ sc.applySyncResponse = vi.fn().mockReturnValue({ applied: 0 });
+ sc.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
}
+/**
+ * Mocks on _syncController — processSyncRequest is called by HttpSyncServer
+ * via the host reference, which delegates to the controller.
+ */
function mockServerGraph(/** @type {WarpGraph} */ graph) {
const g = /** @type {Record} */ (/** @type {unknown} */ (graph));
- g.processSyncRequest = vi.fn().mockResolvedValue({
+ const sc = /** @type {Record} */ (g._syncController);
+ sc.processSyncRequest = vi.fn().mockResolvedValue({
type: 'sync-response',
frontier: {},
patches: [],
@@ -51,8 +61,8 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
});
expect(result.applied).toBe(0);
- expect(clientGraph.applySyncResponse).toHaveBeenCalled();
- expect(serverGraph.processSyncRequest).toHaveBeenCalled();
+ expect(/** @type {any} */ (clientGraph)._syncController.applySyncResponse).toHaveBeenCalled();
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).toHaveBeenCalled();
} finally {
await handle.close();
}
@@ -76,7 +86,7 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
clientGraph.syncWith(handle.url, { timeoutMs: 5000 }),
).rejects.toMatchObject({ code: 'E_SYNC_PROTOCOL' });
- expect(serverGraph.processSyncRequest).not.toHaveBeenCalled();
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).not.toHaveBeenCalled();
} finally {
await handle.close();
}
@@ -103,7 +113,7 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
}),
).rejects.toMatchObject({ code: 'E_SYNC_PROTOCOL' });
- expect(serverGraph.processSyncRequest).not.toHaveBeenCalled();
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).not.toHaveBeenCalled();
} finally {
await handle.close();
}
@@ -130,7 +140,7 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
}),
).rejects.toMatchObject({ code: 'E_SYNC_PROTOCOL' });
- expect(serverGraph.processSyncRequest).not.toHaveBeenCalled();
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).not.toHaveBeenCalled();
} finally {
await handle.close();
}
@@ -153,7 +163,7 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
const result = await clientGraph.syncWith(handle.url, { timeoutMs: 5000 });
expect(result.applied).toBe(0);
- expect(serverGraph.processSyncRequest).toHaveBeenCalled();
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).toHaveBeenCalled();
} finally {
await handle.close();
}
@@ -178,7 +188,7 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
});
expect(result.applied).toBe(0);
- expect(serverGraph.processSyncRequest).toHaveBeenCalled();
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).toHaveBeenCalled();
} finally {
await handle.close();
}
@@ -211,7 +221,7 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
});
expect(result2.applied).toBe(0);
- expect(serverGraph.processSyncRequest).toHaveBeenCalledTimes(2);
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).toHaveBeenCalledTimes(2);
} finally {
await handle.close();
}
@@ -249,7 +259,7 @@ describe('WarpGraph syncAuth (real HTTP)', () => {
});
expect(resultB.applied).toBe(0);
- expect(serverGraph.processSyncRequest).toHaveBeenCalledTimes(2);
+ expect(/** @type {*} */ (serverGraph)._syncController.processSyncRequest).toHaveBeenCalledTimes(2);
} finally {
await handle.close();
}
diff --git a/test/unit/domain/WarpGraph.syncWith.test.js b/test/unit/domain/WarpGraph.syncWith.test.js
index 5cf01d66..ab70cfea 100644
--- a/test/unit/domain/WarpGraph.syncWith.test.js
+++ b/test/unit/domain/WarpGraph.syncWith.test.js
@@ -25,8 +25,8 @@ describe('WarpGraph syncWith', () => {
beforeEach(async () => {
graph = await createGraph();
/** @type {any} */ (graph)._cachedState = {};
- graph.applySyncResponse = vi.fn().mockReturnValue({ applied: 0 });
- graph.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
+ vi.spyOn(/** @type {any} */ (graph)._syncController, 'applySyncResponse').mockReturnValue({ applied: 0 });
+ vi.spyOn(/** @type {any} */ (graph)._syncController, 'createSyncRequest').mockResolvedValue({ type: 'sync-request', frontier: {} });
});
it('syncs over HTTP with default /sync path', async () => {
@@ -46,7 +46,7 @@ describe('WarpGraph syncWith', () => {
try {
const result = await graph.syncWith(`http://127.0.0.1:${port}`);
expect(result.applied).toBe(0);
- expect(graph.applySyncResponse).toHaveBeenCalledWith(responsePayload);
+ expect(/** @type {any} */ (graph)._syncController.applySyncResponse).toHaveBeenCalledWith(responsePayload);
} finally {
await new Promise((resolve) => server.close(resolve));
}
diff --git a/test/unit/domain/WarpGraph.timing.test.js b/test/unit/domain/WarpGraph.timing.test.js
index 638e9430..1f81ac88 100644
--- a/test/unit/domain/WarpGraph.timing.test.js
+++ b/test/unit/domain/WarpGraph.timing.test.js
@@ -249,8 +249,8 @@ describe('WarpGraph operation timing (LH/TIMING/1)', () => {
// Pre-cache state so sync doesn't need to materialize
/** @type {any} */ (graph)._cachedState = createEmptyStateV5();
- graph.applySyncResponse = vi.fn().mockReturnValue({ applied: 5 });
- graph.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
+ /** @type {any} */ (graph)._syncController.applySyncResponse = vi.fn().mockReturnValue({ applied: 5 });
+ /** @type {any} */ (graph)._syncController.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
const responsePayload = { type: 'sync-response', frontier: {}, patches: [] };
const peer = { processSyncRequest: vi.fn().mockResolvedValue(responsePayload) };
@@ -273,8 +273,8 @@ describe('WarpGraph operation timing (LH/TIMING/1)', () => {
});
/** @type {any} */ (graph)._cachedState = createEmptyStateV5();
- graph.applySyncResponse = vi.fn().mockReturnValue({ applied: 0 });
- graph.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
+ /** @type {any} */ (graph)._syncController.applySyncResponse = vi.fn().mockReturnValue({ applied: 0 });
+ /** @type {any} */ (graph)._syncController.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
const responsePayload = { type: 'sync-response', frontier: {}, patches: [] };
const peer = { processSyncRequest: vi.fn().mockResolvedValue(responsePayload) };
@@ -295,7 +295,7 @@ describe('WarpGraph operation timing (LH/TIMING/1)', () => {
});
/** @type {any} */ (graph)._cachedState = createEmptyStateV5();
- graph.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
+ /** @type {any} */ (graph)._syncController.createSyncRequest = vi.fn().mockResolvedValue({ type: 'sync-request', frontier: {} });
const peer = {
processSyncRequest: vi.fn().mockRejectedValue(new Error('peer unreachable')),
diff --git a/test/unit/domain/services/BitmapIndexReader.test.js b/test/unit/domain/services/BitmapIndexReader.test.js
index cfb6ea29..98d2682c 100644
--- a/test/unit/domain/services/BitmapIndexReader.test.js
+++ b/test/unit/domain/services/BitmapIndexReader.test.js
@@ -42,11 +42,9 @@ const createV2Shard = (/** @type {any} */ data) => ({
});
describe('BitmapIndexReader', () => {
- /** @type {any} */
/** @type {any} */
let mockStorage;
/** @type {any} */
- /** @type {any} */
let reader;
beforeEach(() => {
@@ -76,9 +74,67 @@ describe('BitmapIndexReader', () => {
});
});
+ describe('OID validation in setup()', () => {
+ it('accepts valid hex OIDs', () => {
+ const validOids = {
+ 'meta_ab.json': 'a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2',
+ 'shards_fwd_ab.json': 'f6e5d4c3b2a1f6e5d4c3b2a1f6e5d4c3b2a1f6e5',
+ };
+ reader.setup(validOids);
+ expect(reader.shardOids.size).toBe(2);
+ });
+
+ it('skips invalid OIDs in non-strict mode with warning', () => {
+ const warnSpy = vi.fn();
+ const lenientReader = new BitmapIndexReader(/** @type {any} */ ({
+ storage: mockStorage,
+ strict: false,
+ logger: { warn: warnSpy, info: vi.fn(), error: vi.fn(), debug: vi.fn() },
+ }));
+ lenientReader.setup({
+ 'meta_ab.json': 'a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2',
+ 'meta_cd.json': 'not-a-valid-oid!!!',
+ });
+ expect(lenientReader.shardOids.size).toBe(1);
+ expect(lenientReader.shardOids.has('meta_ab.json')).toBe(true);
+ expect(lenientReader.shardOids.has('meta_cd.json')).toBe(false);
+ expect(warnSpy).toHaveBeenCalledWith('Skipping shard with invalid OID', expect.objectContaining({
+ shardPath: 'meta_cd.json',
+ reason: 'invalid_oid',
+ }));
+ });
+
+ it('throws ShardCorruptionError for invalid OIDs in strict mode', () => {
+ const strictReader = new BitmapIndexReader(/** @type {any} */ ({
+ storage: mockStorage,
+ strict: true,
+ }));
+ expect(() => strictReader.setup({
+ 'meta_ab.json': 'not-valid-oid',
+ })).toThrow(ShardCorruptionError);
+ });
+
+ it('includes shard path and OID in strict mode error', () => {
+ const strictReader = new BitmapIndexReader(/** @type {any} */ ({
+ storage: mockStorage,
+ strict: true,
+ }));
+ try {
+ strictReader.setup({ 'meta_ab.json': 'bad!' });
+ expect.fail('should have thrown');
+ } catch (err) {
+ const e = /** @type {import('../../../../src/domain/errors/ShardCorruptionError.js').default} */ (err);
+ expect(e).toBeInstanceOf(ShardCorruptionError);
+ expect(e.shardPath).toBe('meta_ab.json');
+ expect(e.oid).toBe('bad!');
+ expect(e.reason).toBe('invalid_oid');
+ }
+ });
+ });
+
describe('setup', () => {
it('stores shard OIDs for lazy loading', () => {
- reader.setup({ 'meta_aa.json': 'oid1', 'shards_fwd_aa.json': 'oid2' });
+ reader.setup({ 'meta_aa.json': 'a1b2c3d400000000000000000000000000000000', 'shards_fwd_aa.json': 'e5f6a7b800000000000000000000000000000000' });
expect(reader.shardOids.size).toBe(2);
});
@@ -103,24 +159,24 @@ describe('BitmapIndexReader', () => {
it('loads and decodes bitmap data', async () => {
// Build a real index
const builder = new BitmapIndexBuilder();
- builder.addEdge('aabbccdd', 'eeffgghh');
+ builder.addEdge('aabbccdd00000000000000000000000000000000', 'eeff00dd00000000000000000000000000000000');
const tree = await builder.serialize();
// Mock storage to return serialized data
mockStorage.readBlob.mockImplementation(async (/** @type {any} */ oid) => {
- if (oid === 'meta-oid') return tree['meta_aa.json'] || tree['meta_ee.json'];
- if (oid === 'rev-oid') return tree['shards_rev_ee.json'];
+ if (oid === 'aaa1bbb200000000000000000000000000000000') return tree['meta_aa.json'] || tree['meta_ee.json'];
+ if (oid === 'bbb2ccc300000000000000000000000000000000') return tree['shards_rev_ee.json'];
return Buffer.from('{}');
});
reader.setup({
- 'meta_aa.json': 'meta-oid',
- 'meta_ee.json': 'meta-oid',
- 'shards_rev_ee.json': 'rev-oid',
+ 'meta_aa.json': 'aaa1bbb200000000000000000000000000000000',
+ 'meta_ee.json': 'aaa1bbb200000000000000000000000000000000',
+ 'shards_rev_ee.json': 'bbb2ccc300000000000000000000000000000000',
});
- const parents = await reader.getParents('eeffgghh');
- expect(parents).toContain('aabbccdd');
+ const parents = await reader.getParents('eeff00dd00000000000000000000000000000000');
+ expect(parents).toContain('aabbccdd00000000000000000000000000000000');
});
});
@@ -137,41 +193,43 @@ describe('BitmapIndexReader', () => {
mockStorage.readBlob.mockRejectedValue(new Error('object not found'));
reader.setup({
- 'meta_ab.json': 'nonexistent-oid',
- 'shards_rev_ab.json': 'also-nonexistent'
+ 'meta_ab.json': 'ccc3ddd400000000000000000000000000000000',
+ 'shards_rev_ab.json': 'ddd4eee500000000000000000000000000000000'
});
- await expect(reader.getParents('abcd1234')).rejects.toThrow(ShardLoadError);
+ await expect(reader.getParents('abcd123400000000000000000000000000000000')).rejects.toThrow(ShardLoadError);
});
- it('returns empty array when shard contains invalid JSON', async () => {
+ it('returns empty array when shard contains invalid JSON (non-strict)', async () => {
+ const lenient = new BitmapIndexReader(/** @type {any} */ ({ storage: mockStorage, strict: false }));
mockStorage.readBlob.mockResolvedValue(Buffer.from('not valid json {{{'));
- reader.setup({
- 'meta_ab.json': 'corrupt-oid',
- 'shards_rev_ab.json': 'corrupt-oid'
+ lenient.setup({
+ 'meta_ab.json': 'eee5fff600000000000000000000000000000000',
+ 'shards_rev_ab.json': 'eee5fff600000000000000000000000000000000'
});
- const parents = await reader.getParents('abcd1234');
+ const parents = await lenient.getParents('abcd123400000000000000000000000000000000');
expect(parents).toEqual([]);
});
- it('returns empty array when shard contains wrong data type', async () => {
+ it('returns empty array when shard contains wrong data type (non-strict)', async () => {
+ const lenient = new BitmapIndexReader(/** @type {any} */ ({ storage: mockStorage, strict: false }));
// Valid JSON but wrong structure (array instead of object)
mockStorage.readBlob.mockResolvedValue(Buffer.from('[1,2,3]'));
- reader.setup({
- 'shards_rev_ab.json': 'wrong-type-oid'
+ lenient.setup({
+ 'shards_rev_ab.json': 'fff6aaa100000000000000000000000000000000'
});
- const parents = await reader.getParents('abcd1234');
+ const parents = await lenient.getParents('abcd123400000000000000000000000000000000');
expect(parents).toEqual([]);
});
it('throws ShardLoadError on storage failure but continues after', async () => {
// Build a real index for comparison
const builder = new BitmapIndexBuilder();
- builder.addEdge('aabbccdd', 'eeffgghh');
+ builder.addEdge('aabbccdd00000000000000000000000000000000', 'eeff00dd00000000000000000000000000000000');
const tree = await builder.serialize();
let callCount = 0;
@@ -182,20 +240,20 @@ describe('BitmapIndexReader', () => {
throw new Error('transient failure');
}
// Return real data for subsequent calls
- if (oid === 'meta-oid') return tree['meta_aa.json'] || tree['meta_ee.json'];
- if (oid === 'rev-oid') return tree['shards_rev_ee.json'];
+ if (oid === 'aaa1bbb200000000000000000000000000000000') return tree['meta_aa.json'] || tree['meta_ee.json'];
+ if (oid === 'bbb2ccc300000000000000000000000000000000') return tree['shards_rev_ee.json'];
return Buffer.from('{}');
});
reader.setup({
- 'meta_aa.json': 'meta-oid',
- 'meta_ee.json': 'meta-oid',
- 'shards_rev_ee.json': 'rev-oid',
- 'shards_rev_aa.json': 'corrupt-oid' // This one fails
+ 'meta_aa.json': 'aaa1bbb200000000000000000000000000000000',
+ 'meta_ee.json': 'aaa1bbb200000000000000000000000000000000',
+ 'shards_rev_ee.json': 'bbb2ccc300000000000000000000000000000000',
+ 'shards_rev_aa.json': 'eee5fff600000000000000000000000000000000' // This one fails
});
// First query hits storage error - should throw ShardLoadError
- await expect(reader.getParents('aabbccdd')).rejects.toThrow(ShardLoadError);
+ await expect(reader.getParents('aabbccdd00000000000000000000000000000000')).rejects.toThrow(ShardLoadError);
// Reader should still be functional for other queries
// (the reader wasn't corrupted by the error)
@@ -211,10 +269,10 @@ describe('BitmapIndexReader', () => {
})));
strictReader.setup({
- 'shards_rev_ab.json': 'bad-version-oid'
+ 'shards_rev_ab.json': 'aab1ccdd00000000000000000000000000000000'
});
- await expect(strictReader.getParents('abcd1234')).rejects.toThrow(ShardValidationError);
+ await expect(strictReader.getParents('abcd123400000000000000000000000000000000')).rejects.toThrow(ShardValidationError);
});
it('in strict mode throws ShardCorruptionError on invalid format', async () => {
@@ -222,10 +280,10 @@ describe('BitmapIndexReader', () => {
mockStorage.readBlob.mockResolvedValue(Buffer.from('not valid json {{{'));
strictReader.setup({
- 'shards_rev_ab.json': 'corrupt-oid'
+ 'shards_rev_ab.json': 'eee5fff600000000000000000000000000000000'
});
- await expect(strictReader.getParents('abcd1234')).rejects.toThrow(ShardCorruptionError);
+ await expect(strictReader.getParents('abcd123400000000000000000000000000000000')).rejects.toThrow(ShardCorruptionError);
});
it('in strict mode throws ShardValidationError on checksum mismatch', async () => {
@@ -237,10 +295,10 @@ describe('BitmapIndexReader', () => {
})));
strictReader.setup({
- 'meta_ab.json': 'bad-checksum-oid'
+ 'meta_ab.json': 'bbccdd1100000000000000000000000000000000'
});
- await expect(strictReader.lookupId('abcd1234')).rejects.toThrow(ShardValidationError);
+ await expect(strictReader.lookupId('abcd123400000000000000000000000000000000')).rejects.toThrow(ShardValidationError);
});
it('error objects contain useful context for debugging', async () => {
@@ -252,11 +310,11 @@ describe('BitmapIndexReader', () => {
})));
strictReader.setup({
- 'shards_fwd_cd.json': 'context-test-oid'
+ 'shards_fwd_cd.json': 'ccddee2200000000000000000000000000000000'
});
try {
- await strictReader.getChildren('cdcd1234');
+ await strictReader.getChildren('cdcd123400000000000000000000000000000000');
expect.fail('Should have thrown');
} catch (/** @type {any} */ err) {
expect(err).toBeInstanceOf(ShardValidationError);
@@ -273,17 +331,17 @@ describe('BitmapIndexReader', () => {
mockStorage.readBlob.mockRejectedValue(originalError);
reader.setup({
- 'meta_ef.json': 'network-fail-oid'
+ 'meta_ef.json': 'ddeeff3300000000000000000000000000000000'
});
try {
- await reader.lookupId('efgh5678');
+ await reader.lookupId('ef00567800000000000000000000000000000000');
expect.fail('Should have thrown');
} catch (/** @type {any} */ err) {
expect(err).toBeInstanceOf(ShardLoadError);
expect(err.code).toBe('SHARD_LOAD_ERROR');
expect(err.shardPath).toBe('meta_ef.json');
- expect(err.oid).toBe('network-fail-oid');
+ expect(err.oid).toBe('ddeeff3300000000000000000000000000000000');
expect(err.cause).toBe(originalError);
}
});
@@ -291,19 +349,19 @@ describe('BitmapIndexReader', () => {
it('non-strict mode returns empty but strict mode throws for same corruption', async () => {
const corruptData = Buffer.from('{"not": "a valid shard format"}');
- // Non-strict reader (default)
+ // Non-strict reader (explicit override)
const nonStrictReader = new BitmapIndexReader(/** @type {any} */ ({ storage: mockStorage, strict: false }));
mockStorage.readBlob.mockResolvedValue(corruptData);
- nonStrictReader.setup({ 'shards_rev_ab.json': 'corrupt-oid' });
+ nonStrictReader.setup({ 'shards_rev_ab.json': 'eee5fff600000000000000000000000000000000' });
- const nonStrictResult = await nonStrictReader.getParents('abcd1234');
+ const nonStrictResult = await nonStrictReader.getParents('abcd123400000000000000000000000000000000');
expect(nonStrictResult).toEqual([]); // Graceful degradation
// Strict reader
const strictReader = new BitmapIndexReader(/** @type {any} */ ({ storage: mockStorage, strict: true }));
- strictReader.setup({ 'shards_rev_ab.json': 'corrupt-oid' });
+ strictReader.setup({ 'shards_rev_ab.json': 'eee5fff600000000000000000000000000000000' });
- await expect(strictReader.getParents('abcd1234')).rejects.toThrow(ShardCorruptionError);
+ await expect(strictReader.getParents('abcd123400000000000000000000000000000000')).rejects.toThrow(ShardCorruptionError);
});
it('caches empty shard on validation failure to avoid repeated I/O and log spam', async () => {
@@ -326,10 +384,10 @@ describe('BitmapIndexReader', () => {
data: {}
})));
- nonStrictReader.setup({ 'shards_rev_ab.json': 'bad-version-oid' });
+ nonStrictReader.setup({ 'shards_rev_ab.json': 'aab1ccdd00000000000000000000000000000000' });
// First access - should log warning
- const result1 = await nonStrictReader.getParents('abcd1234');
+ const result1 = await nonStrictReader.getParents('abcd123400000000000000000000000000000000');
expect(result1).toEqual([]);
expect(mockLogger.warn).toHaveBeenCalledTimes(1);
expect(mockLogger.warn).toHaveBeenCalledWith('Shard validation warning', expect.objectContaining({
@@ -337,7 +395,7 @@ describe('BitmapIndexReader', () => {
}));
// Second access to same shard - should NOT log again (cached)
- const result2 = await nonStrictReader.getParents('abcd1234');
+ const result2 = await nonStrictReader.getParents('abcd123400000000000000000000000000000000');
expect(result2).toEqual([]);
expect(mockLogger.warn).toHaveBeenCalledTimes(1); // Still only 1 call
@@ -361,10 +419,10 @@ describe('BitmapIndexReader', () => {
// Return invalid JSON (parse error)
mockStorage.readBlob.mockResolvedValue(Buffer.from('not valid json {{{'));
- nonStrictReader.setup({ 'shards_rev_ab.json': 'corrupt-oid' });
+ nonStrictReader.setup({ 'shards_rev_ab.json': 'eee5fff600000000000000000000000000000000' });
// First access - should log warning
- const result1 = await nonStrictReader.getParents('abcd1234');
+ const result1 = await nonStrictReader.getParents('abcd123400000000000000000000000000000000');
expect(result1).toEqual([]);
expect(mockLogger.warn).toHaveBeenCalledTimes(1);
expect(mockLogger.warn).toHaveBeenCalledWith('Shard validation warning', expect.objectContaining({
@@ -373,7 +431,7 @@ describe('BitmapIndexReader', () => {
}));
// Second access to same shard - should NOT log again (cached)
- const result2 = await nonStrictReader.getParents('abcd1234');
+ const result2 = await nonStrictReader.getParents('abcd123400000000000000000000000000000000');
expect(result2).toEqual([]);
expect(mockLogger.warn).toHaveBeenCalledTimes(1); // Still only 1 call
@@ -384,30 +442,30 @@ describe('BitmapIndexReader', () => {
describe('shard versioning', () => {
it('accepts v1 shards for backward compatibility', async () => {
- const v1Data = { 'abcd1234': 42 };
+ const v1Data = { 'abcd123400000000000000000000000000000000': 42 };
const v1Shard = createV1Shard(v1Data);
mockStorage.readBlob.mockResolvedValue(Buffer.from(JSON.stringify(v1Shard)));
reader.setup({
- 'meta_ab.json': 'v1-shard-oid'
+ 'meta_ab.json': 'eeff004400000000000000000000000000000000'
});
- const id = await reader.lookupId('abcd1234');
+ const id = await reader.lookupId('abcd123400000000000000000000000000000000');
expect(id).toBe(42);
});
it('accepts v2 shards', async () => {
- const v2Data = { 'abcd1234': 99 };
+ const v2Data = { 'abcd123400000000000000000000000000000000': 99 };
const v2Shard = createV2Shard(v2Data);
mockStorage.readBlob.mockResolvedValue(Buffer.from(JSON.stringify(v2Shard)));
reader.setup({
- 'meta_ab.json': 'v2-shard-oid'
+ 'meta_ab.json': 'ff00115500000000000000000000000000000000'
});
- const id = await reader.lookupId('abcd1234');
+ const id = await reader.lookupId('abcd123400000000000000000000000000000000');
expect(id).toBe(99);
});
@@ -418,20 +476,20 @@ describe('BitmapIndexReader', () => {
const v2ShardWithBadChecksum = {
version: 2,
checksum: 'intentionally-wrong-checksum-value',
- data: { 'abcd1234': 123 }
+ data: { 'abcd123400000000000000000000000000000000': 123 }
};
mockStorage.readBlob.mockResolvedValue(Buffer.from(JSON.stringify(v2ShardWithBadChecksum)));
strictReader.setup({
- 'meta_ab.json': 'bad-checksum-v2-oid'
+ 'meta_ab.json': '0011226600000000000000000000000000000000'
});
- await expect(strictReader.lookupId('abcd1234')).rejects.toThrow(ShardValidationError);
+ await expect(strictReader.lookupId('abcd123400000000000000000000000000000000')).rejects.toThrow(ShardValidationError);
// Verify the error contains the expected context
try {
- await strictReader.lookupId('abcd1234');
+ await strictReader.lookupId('abcd123400000000000000000000000000000000');
} catch (/** @type {any} */ err) {
expect(err.field).toBe('checksum');
expect(err.shardPath).toBe('meta_ab.json');
@@ -456,17 +514,17 @@ describe('BitmapIndexReader', () => {
const v2ShardWithBadChecksum = {
version: 2,
checksum: 'intentionally-wrong-checksum-value',
- data: { 'abcd1234': 123 }
+ data: { 'abcd123400000000000000000000000000000000': 123 }
};
mockStorage.readBlob.mockResolvedValue(Buffer.from(JSON.stringify(v2ShardWithBadChecksum)));
nonStrictReader.setup({
- 'meta_ab.json': 'bad-checksum-v2-oid'
+ 'meta_ab.json': '0011226600000000000000000000000000000000'
});
// Should not throw, but return undefined (empty shard cached)
- const result = await nonStrictReader.lookupId('abcd1234');
+ const result = await nonStrictReader.lookupId('abcd123400000000000000000000000000000000');
expect(result).toBeUndefined();
// Should have logged a warning
@@ -500,7 +558,7 @@ describe('BitmapIndexReader', () => {
mockStorage.readBlob.mockResolvedValue(Buffer.from(JSON.stringify(v1Shard)));
reader.setup({
- 'meta_ze.json': 'v1-key-order-oid'
+ 'meta_ze.json': '1122337700000000000000000000000000000000'
});
// Should succeed because v1 uses JSON.stringify for verification
@@ -526,7 +584,7 @@ describe('BitmapIndexReader', () => {
mockStorage.readBlob.mockResolvedValue(Buffer.from(JSON.stringify(v2Shard)));
reader.setup({
- 'meta_ze.json': 'v2-canonical-oid'
+ 'meta_ze.json': '2233448800000000000000000000000000000000'
});
// Should succeed because v2 uses canonicalStringify for verification
@@ -555,27 +613,27 @@ describe('BitmapIndexReader', () => {
});
smallCacheReader.setup({
- 'meta_aa.json': 'oid-aa',
- 'meta_bb.json': 'oid-bb',
- 'meta_cc.json': 'oid-cc',
+ 'meta_aa.json': 'aa00112200000000000000000000000000000000',
+ 'meta_bb.json': 'bb33445500000000000000000000000000000000',
+ 'meta_cc.json': 'cc66778800000000000000000000000000000000',
});
// Load first shard
- await smallCacheReader.lookupId('aabbccdd');
+ await smallCacheReader.lookupId('aabbccdd00000000000000000000000000000000');
expect(smallCacheReader.loadedShards.size).toBe(1);
expect(mockStorage.readBlob).toHaveBeenCalledTimes(1);
// Load second shard
- await smallCacheReader.lookupId('bbccddee');
+ await smallCacheReader.lookupId('bbccddee00000000000000000000000000000000');
expect(smallCacheReader.loadedShards.size).toBe(2);
expect(mockStorage.readBlob).toHaveBeenCalledTimes(2);
// Load third shard - should evict first
- await smallCacheReader.lookupId('ccddeeff');
+ await smallCacheReader.lookupId('ccddeeff00000000000000000000000000000000');
expect(smallCacheReader.loadedShards.size).toBe(2); // Still 2 due to LRU eviction
// First shard should be evicted, accessing it again should reload
- await smallCacheReader.lookupId('aabbccdd');
+ await smallCacheReader.lookupId('aabbccdd00000000000000000000000000000000');
expect(mockStorage.readBlob).toHaveBeenCalledTimes(4); // 3 + 1 reload
});
@@ -596,20 +654,20 @@ describe('BitmapIndexReader', () => {
});
smallCacheReader.setup({
- 'meta_aa.json': 'oid-aa',
- 'meta_bb.json': 'oid-bb',
- 'meta_cc.json': 'oid-cc',
+ 'meta_aa.json': 'aa00112200000000000000000000000000000000',
+ 'meta_bb.json': 'bb33445500000000000000000000000000000000',
+ 'meta_cc.json': 'cc66778800000000000000000000000000000000',
});
// Load first two shards
- await smallCacheReader.lookupId('aabbccdd'); // Load aa
- await smallCacheReader.lookupId('bbccddee'); // Load bb
+ await smallCacheReader.lookupId('aabbccdd00000000000000000000000000000000'); // Load aa
+ await smallCacheReader.lookupId('bbccddee00000000000000000000000000000000'); // Load bb
// Access 'aa' again to make it recently used
- await smallCacheReader.lookupId('aabbccdd');
+ await smallCacheReader.lookupId('aabbccdd00000000000000000000000000000000');
// Load third shard - should evict 'bb' (now oldest)
- await smallCacheReader.lookupId('ccddeeff'); // Load cc
+ await smallCacheReader.lookupId('ccddeeff00000000000000000000000000000000'); // Load cc
// 'aa' should still be in cache (was recently used)
expect(smallCacheReader.loadedShards.has('meta_aa.json')).toBe(true);
diff --git a/test/unit/domain/services/CheckpointService.test.js b/test/unit/domain/services/CheckpointService.test.js
index 7f2fd564..20b240b9 100644
--- a/test/unit/domain/services/CheckpointService.test.js
+++ b/test/unit/domain/services/CheckpointService.test.js
@@ -24,7 +24,6 @@ const makeOid = (/** @type {string} */ prefix) => {
};
describe('CheckpointService', () => {
- /** @type {any} */
/** @type {any} */
let mockPersistence;
@@ -543,8 +542,6 @@ describe('CheckpointService', () => {
});
describe('V5 checkpoint with full ORSet state', () => {
- /** @type {any} */
- /** @type {any} */
/** @type {any} */
let mockPersistence;
@@ -621,7 +618,6 @@ describe('CheckpointService', () => {
const frontier = createFrontier();
updateFrontier(frontier, 'alice', makeOid('sha1'));
- /** @type {any} */
/** @type {any} */
let capturedStateBuffer;
mockPersistence.writeBlob.mockImplementation((/** @type {any} */ buffer) => {
@@ -658,7 +654,6 @@ describe('CheckpointService', () => {
const frontier = createFrontier();
updateFrontier(frontier, 'alice', makeOid('sha1'));
- /** @type {any} */
/** @type {any} */
let capturedStateBuffer;
mockPersistence.writeBlob.mockImplementation((/** @type {any} */ buffer) => {
@@ -944,7 +939,6 @@ describe('CheckpointService', () => {
// Create checkpoint with compaction
/** @type {any} */
- /** @type {any} */
let writtenVisibleBlob;
let blobIndex = 0;
mockPersistence.writeBlob.mockImplementation((/** @type {any} */ buffer) => {
diff --git a/test/unit/domain/services/CommitDagTraversalService.test.js b/test/unit/domain/services/CommitDagTraversalService.test.js
index 9d0000a3..6b93c300 100644
--- a/test/unit/domain/services/CommitDagTraversalService.test.js
+++ b/test/unit/domain/services/CommitDagTraversalService.test.js
@@ -17,7 +17,6 @@ import TraversalError from '../../../../src/domain/errors/TraversalError.js';
* Reverse edges: B->A, C->A, D->B, D->C, E->D
*/
function createMockIndexReader() {
- /** @type {Record} */
/** @type {Record} */
const forwardEdges = {
A: ['B', 'C'],
@@ -27,7 +26,6 @@ function createMockIndexReader() {
E: [],
};
- /** @type {Record} */
/** @type {Record} */
const reverseEdges = {
A: [],
@@ -303,13 +301,11 @@ describe('CommitDagTraversalService', () => {
// Create a cycle: A -> B -> C -> A
const cyclicReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { A: ['B'], B: ['C'], C: ['A'] };
return edges[sha] || [];
}),
getParents: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { B: ['A'], C: ['B'], A: ['C'] };
return edges[sha] || [];
@@ -327,7 +323,6 @@ describe('CommitDagTraversalService', () => {
it('logs warning when cycle is detected', async () => {
const cyclicReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { A: ['B'], B: ['C'], C: ['A'] };
return edges[sha] || [];
@@ -361,7 +356,6 @@ describe('CommitDagTraversalService', () => {
it('throws TraversalError when throwOnCycle is true and cycle detected', async () => {
const cyclicReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { A: ['B'], B: ['C'], C: ['A'] };
return edges[sha] || [];
@@ -398,13 +392,11 @@ describe('CommitDagTraversalService', () => {
// Create a self-loop: A -> A (node A points to itself)
const selfLoopReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { A: ['A'] }; // A is its own child
return edges[sha] || [];
}),
getParents: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { A: ['A'] }; // A is its own parent
return edges[sha] || [];
@@ -434,7 +426,6 @@ describe('CommitDagTraversalService', () => {
// Create a self-loop: A -> A
const selfLoopReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { A: ['A'] };
return edges[sha] || [];
@@ -457,7 +448,6 @@ describe('CommitDagTraversalService', () => {
it('logs warning for self-loop cycle', async () => {
const selfLoopReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = { A: ['A'] };
return edges[sha] || [];
@@ -493,7 +483,6 @@ describe('CommitDagTraversalService', () => {
// Island 2: X -> Y -> Z (disconnected from Island 1)
const disconnectedReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B'],
@@ -506,7 +495,6 @@ describe('CommitDagTraversalService', () => {
return edges[sha] || [];
}),
getParents: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: [],
@@ -570,7 +558,6 @@ describe('CommitDagTraversalService', () => {
// Cheapest: A->C->D (cost 2) vs A->B->D (cost 11)
const weightedReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'C'],
@@ -581,7 +568,6 @@ describe('CommitDagTraversalService', () => {
return edges[sha] || [];
}),
getParents: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: [],
@@ -668,7 +654,6 @@ describe('CommitDagTraversalService', () => {
// Create a graph with disconnected components
const disconnectedReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B'],
@@ -679,7 +664,6 @@ describe('CommitDagTraversalService', () => {
return edges[sha] || [];
}),
getParents: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: [],
@@ -732,7 +716,6 @@ describe('CommitDagTraversalService', () => {
// Shortest path A->E: A->B->E (cost 2) or A->D->E (cost 3) or A->C->E (cost 6)
const complexReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'C', 'D'],
@@ -772,7 +755,6 @@ describe('CommitDagTraversalService', () => {
it('handles zero-weight edges', async () => {
const zeroWeightReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'C'],
@@ -871,7 +853,6 @@ describe('CommitDagTraversalService', () => {
// With a good heuristic, A* should explore fewer nodes
const gridReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'D'],
@@ -925,7 +906,6 @@ describe('CommitDagTraversalService', () => {
// With heuristic pointing toward C->G->J path, fewer nodes explored.
const wideReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'C', 'D'],
@@ -1044,7 +1024,6 @@ describe('CommitDagTraversalService', () => {
// Create a weighted graph
const weightedReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'C'],
@@ -1108,7 +1087,6 @@ describe('CommitDagTraversalService', () => {
// because it has made more "actual progress" (g=2 > g=1)
const tieBreakReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
START: ['A', 'B'],
@@ -1226,7 +1204,6 @@ describe('CommitDagTraversalService', () => {
// Create a graph with weighted edges where path choice matters
const weightedReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'C'],
@@ -1238,7 +1215,6 @@ describe('CommitDagTraversalService', () => {
return edges[sha] || [];
}),
getParents: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: [],
@@ -1374,7 +1350,6 @@ describe('CommitDagTraversalService', () => {
// Create a graph with different edge weights
const weightedReader = {
getChildren: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: ['B', 'C'],
@@ -1385,7 +1360,6 @@ describe('CommitDagTraversalService', () => {
return edges[sha] || [];
}),
getParents: vi.fn(async (/** @type {string} */ sha) => {
- /** @type {Record} */
/** @type {Record} */
const edges = {
A: [],
diff --git a/test/unit/domain/services/GitLogParser.test.js b/test/unit/domain/services/GitLogParser.test.js
index 9f3bd491..45774c36 100644
--- a/test/unit/domain/services/GitLogParser.test.js
+++ b/test/unit/domain/services/GitLogParser.test.js
@@ -3,7 +3,6 @@ import GitLogParser, { RECORD_SEPARATOR } from '../../../../src/domain/services/
import GraphNode from '../../../../src/domain/entities/GraphNode.js';
describe('GitLogParser', () => {
- /** @type {any} */
/** @type {any} */
let parser;
diff --git a/test/unit/domain/services/HealthCheckService.test.js b/test/unit/domain/services/HealthCheckService.test.js
index 13895479..9a5555dd 100644
--- a/test/unit/domain/services/HealthCheckService.test.js
+++ b/test/unit/domain/services/HealthCheckService.test.js
@@ -2,23 +2,17 @@ import { describe, it, expect, vi, beforeEach } from 'vitest';
import HealthCheckService, { HealthStatus } from '../../../../src/domain/services/HealthCheckService.js';
describe('HealthCheckService', () => {
- /** @type {any} */
/** @type {any} */
let service;
/** @type {any} */
- /** @type {any} */
let mockPersistence;
/** @type {any} */
- /** @type {any} */
let mockClock;
/** @type {any} */
- /** @type {any} */
let mockIndexReader;
/** @type {any} */
- /** @type {any} */
let mockLogger;
/** @type {any} */
- /** @type {any} */
let currentTime;
beforeEach(() => {
diff --git a/test/unit/domain/services/HttpSyncServer.test.js b/test/unit/domain/services/HttpSyncServer.test.js
index bbed6f90..59f97969 100644
--- a/test/unit/domain/services/HttpSyncServer.test.js
+++ b/test/unit/domain/services/HttpSyncServer.test.js
@@ -28,7 +28,6 @@ function canonicalStringify(value) {
*/
/** @returns {any} */
function createMockPort() {
- /** @type {any} */
/** @type {any} */
let handler;
let listenCallback;
@@ -67,11 +66,9 @@ function createMockPort() {
}
describe('HttpSyncServer', () => {
- /** @type {any} */
/** @type {any} */
let mockPort;
/** @type {any} */
- /** @type {any} */
let graph;
beforeEach(() => {
@@ -116,7 +113,6 @@ describe('HttpSyncServer', () => {
});
describe('request handling', () => {
- /** @type {any} */
/** @type {any} */
let handler;
diff --git a/test/unit/domain/services/IndexRebuildService.streaming.test.js b/test/unit/domain/services/IndexRebuildService.streaming.test.js
index 46919fd5..0e7860af 100644
--- a/test/unit/domain/services/IndexRebuildService.streaming.test.js
+++ b/test/unit/domain/services/IndexRebuildService.streaming.test.js
@@ -3,17 +3,13 @@ import IndexRebuildService from '../../../../src/domain/services/IndexRebuildSer
import GraphNode from '../../../../src/domain/entities/GraphNode.js';
describe('IndexRebuildService streaming mode', () => {
- /** @type {any} */
/** @type {any} */
let service;
/** @type {any} */
- /** @type {any} */
let mockStorage;
/** @type {any} */
- /** @type {any} */
let mockGraphService;
/** @type {any} */
- /** @type {any} */
let writtenBlobs;
beforeEach(() => {
@@ -22,7 +18,7 @@ describe('IndexRebuildService streaming mode', () => {
mockStorage = {
writeBlob: vi.fn().mockImplementation(async (buffer) => {
- const oid = `blob-${blobCounter++}`;
+ const oid = `b10b${String(blobCounter++).padStart(36, '0')}`;
writtenBlobs.set(oid, buffer);
return oid;
}),
diff --git a/test/unit/domain/services/IndexRebuildService.test.js b/test/unit/domain/services/IndexRebuildService.test.js
index bfda9e1d..1128a4c6 100644
--- a/test/unit/domain/services/IndexRebuildService.test.js
+++ b/test/unit/domain/services/IndexRebuildService.test.js
@@ -6,14 +6,11 @@ import NodeCryptoAdapter from '../../../../src/infrastructure/adapters/NodeCrypt
const crypto = new NodeCryptoAdapter();
describe('IndexRebuildService', () => {
- /** @type {any} */
/** @type {any} */
let service;
/** @type {any} */
- /** @type {any} */
let mockStorage;
/** @type {any} */
- /** @type {any} */
let mockGraphService;
beforeEach(() => {
@@ -131,7 +128,7 @@ describe('IndexRebuildService', () => {
it('strict mode throws ShardValidationError on checksum mismatch', async () => {
// Mock storage to return shard with wrong checksum
mockStorage.readTreeOids.mockResolvedValue({
- 'meta_ab.json': 'bad-checksum-oid'
+ 'meta_ab.json': 'badcbadcbadcbadcbadcbadcbadcbadcbadcbadc'
});
mockStorage.readBlob.mockResolvedValue(Buffer.from(JSON.stringify({
version: 1,
@@ -149,7 +146,7 @@ describe('IndexRebuildService', () => {
it('strict mode throws ShardCorruptionError on invalid format', async () => {
mockStorage.readTreeOids.mockResolvedValue({
- 'meta_ab.json': 'corrupt-oid'
+ 'meta_ab.json': 'c0aac0aac0aac0aac0aac0aac0aac0aac0aac0aa'
});
mockStorage.readBlob.mockResolvedValue(Buffer.from('not valid json'));
@@ -162,7 +159,7 @@ describe('IndexRebuildService', () => {
it('non-strict mode returns empty on integrity failure instead of throwing', async () => {
mockStorage.readTreeOids.mockResolvedValue({
- 'meta_ab.json': 'bad-oid'
+ 'meta_ab.json': 'bad0bad0bad0bad0bad0bad0bad0bad0bad0bad0'
});
mockStorage.readBlob.mockResolvedValue(Buffer.from('invalid'));
diff --git a/test/unit/domain/services/IndexStalenessChecker.test.js b/test/unit/domain/services/IndexStalenessChecker.test.js
index 54420631..41fafd14 100644
--- a/test/unit/domain/services/IndexStalenessChecker.test.js
+++ b/test/unit/domain/services/IndexStalenessChecker.test.js
@@ -108,14 +108,11 @@ describe('checkStaleness', () => {
});
describe('IndexRebuildService.load() staleness integration', () => {
- /** @type {any} */
/** @type {any} */
let storage;
/** @type {any} */
- /** @type {any} */
let logger;
/** @type {any} */
- /** @type {any} */
let graphService;
beforeEach(() => {
@@ -142,8 +139,8 @@ describe('IndexRebuildService.load() staleness integration', () => {
const cborBuffer = Buffer.from(cborEncode(envelope));
storage.readTreeOids.mockResolvedValue({
- 'meta_aa.json': 'meta-oid',
- 'frontier.cbor': 'frontier-oid',
+ 'meta_aa.json': 'aaa1aaa2aaa3aaa4aaa5aaa6aaa7aaa8aaa9aaa0',
+ 'frontier.cbor': 'bbb1bbb2bbb3bbb4bbb5bbb6bbb7bbb8bbb9bbb0',
});
storage.readBlob.mockResolvedValue(cborBuffer);
@@ -163,8 +160,8 @@ describe('IndexRebuildService.load() staleness integration', () => {
const cborBuffer = Buffer.from(cborEncode(envelope));
storage.readTreeOids.mockResolvedValue({
- 'meta_aa.json': 'meta-oid',
- 'frontier.cbor': 'frontier-oid',
+ 'meta_aa.json': 'aaa1aaa2aaa3aaa4aaa5aaa6aaa7aaa8aaa9aaa0',
+ 'frontier.cbor': 'bbb1bbb2bbb3bbb4bbb5bbb6bbb7bbb8bbb9bbb0',
});
storage.readBlob.mockResolvedValue(cborBuffer);
@@ -178,7 +175,7 @@ describe('IndexRebuildService.load() staleness integration', () => {
it('no frontier (legacy) → debug log, no warning', async () => {
storage.readTreeOids.mockResolvedValue({
- 'meta_aa.json': 'meta-oid',
+ 'meta_aa.json': 'aaa1aaa2aaa3aaa4aaa5aaa6aaa7aaa8aaa9aaa0',
});
const service = new IndexRebuildService(/** @type {any} */ ({ graphService, storage, logger }));
@@ -199,15 +196,15 @@ describe('IndexRebuildService.load() staleness integration', () => {
// First call: stale index
storage.readTreeOids.mockResolvedValueOnce({
- 'meta_aa.json': 'meta-oid',
- 'frontier.cbor': 'frontier-oid',
+ 'meta_aa.json': 'aaa1aaa2aaa3aaa4aaa5aaa6aaa7aaa8aaa9aaa0',
+ 'frontier.cbor': 'bbb1bbb2bbb3bbb4bbb5bbb6bbb7bbb8bbb9bbb0',
});
storage.readBlob.mockResolvedValueOnce(cborBuffer);
// rebuild() returns new tree OID
// Second call: rebuilt index (no frontier = no staleness check)
storage.readTreeOids.mockResolvedValueOnce({
- 'meta_aa.json': 'new-meta-oid',
+ 'meta_aa.json': 'ccc1ccc2ccc3ccc4ccc5ccc6ccc7ccc8ccc9ccc0',
});
const currentFrontier = new Map([['alice', 'sha-new']]);
diff --git a/test/unit/domain/services/JoinReducer.test.js b/test/unit/domain/services/JoinReducer.test.js
index 6a98f4c3..6cd88cbd 100644
--- a/test/unit/domain/services/JoinReducer.test.js
+++ b/test/unit/domain/services/JoinReducer.test.js
@@ -7,6 +7,8 @@ import {
decodePropKey,
applyOpV2,
join,
+ applyFast,
+ applyWithReceipt,
joinStates,
reduceV5 as _reduceV5,
cloneStateV5,
@@ -739,4 +741,103 @@ describe('JoinReducer', () => {
expect(receipt).toBeDefined();
});
});
+
+ describe('applyFast / applyWithReceipt', () => {
+ it('applyFast applies ops and updates frontier', () => {
+ const state = createEmptyStateV5();
+ const dot = createDot('w1', 1);
+ const patch = createPatchV2({
+ writer: 'w1',
+ lamport: 1,
+ ops: [createNodeAddV2('n1', dot)],
+ context: createVersionVector(),
+ });
+ const result = applyFast(state, patch, 'fa51aa00ee11');
+ expect(result).toBe(state); // mutates in place
+ expect(orsetContains(state.nodeAlive, 'n1')).toBe(true);
+ expect(state.observedFrontier.get('w1')).toBe(1);
+ });
+
+ it('applyWithReceipt returns state and receipt', () => {
+ const state = createEmptyStateV5();
+ const dot = createDot('w1', 1);
+ const patch = createPatchV2({
+ writer: 'w1',
+ lamport: 1,
+ ops: [createNodeAddV2('n1', dot)],
+ context: createVersionVector(),
+ });
+ const result = applyWithReceipt(state, patch, 'bece1111ee22');
+ expect(result.state).toBe(state);
+ expect(result.receipt).toBeDefined();
+ expect(result.receipt.patchSha).toBe('bece1111ee22');
+ expect(result.receipt.ops).toHaveLength(1);
+ expect(result.receipt.ops[0].op).toBe('NodeAdd');
+ expect(result.receipt.ops[0].result).toBe('applied');
+ expect(orsetContains(state.nodeAlive, 'n1')).toBe(true);
+ expect(state.observedFrontier.get('w1')).toBe(1);
+ });
+
+ it('applyFast handles undefined context gracefully', () => {
+ const state = createEmptyStateV5();
+ const dot = createDot('w1', 1);
+ const patch = {
+ schema: 2,
+ writer: 'w1',
+ lamport: 1,
+ ops: [createNodeAddV2('n1', dot)],
+ context: undefined,
+ };
+ const result = applyFast(state, /** @type {*} */ (patch), 'aa00000000000000');
+ expect(result).toBe(state);
+ expect(orsetContains(state.nodeAlive, 'n1')).toBe(true);
+ expect(state.observedFrontier.get('w1')).toBe(1);
+ });
+
+ it('applyFast handles null context gracefully', () => {
+ const state = createEmptyStateV5();
+ const dot = createDot('w1', 1);
+ const patch = {
+ schema: 2,
+ writer: 'w1',
+ lamport: 1,
+ ops: [createNodeAddV2('n1', dot)],
+ context: null,
+ };
+ const result = applyFast(state, /** @type {*} */ (patch), 'bb00000000000000');
+ expect(result).toBe(state);
+ expect(orsetContains(state.nodeAlive, 'n1')).toBe(true);
+ expect(state.observedFrontier.get('w1')).toBe(1);
+ });
+
+ it('join dispatches to applyFast when collectReceipts is false', () => {
+ const state = createEmptyStateV5();
+ const dot = createDot('w1', 1);
+ const patch = createPatchV2({
+ writer: 'w1',
+ lamport: 1,
+ ops: [createNodeAddV2('n1', dot)],
+ context: createVersionVector(),
+ });
+ const result = join(state, patch, 'd15a07c0');
+ // applyFast returns state directly
+ expect(result).toBe(state);
+ expect(orsetContains(state.nodeAlive, 'n1')).toBe(true);
+ });
+
+ it('join dispatches to applyWithReceipt when collectReceipts is true', () => {
+ const state = createEmptyStateV5();
+ const dot = createDot('w1', 1);
+ const patch = createPatchV2({
+ writer: 'w1',
+ lamport: 1,
+ ops: [createNodeAddV2('n1', dot)],
+ context: createVersionVector(),
+ });
+ const result = /** @type {{state: *, receipt: *}} */ (join(state, patch, 'd15a07c1', true));
+ expect(result.state).toBe(state);
+ expect(result.receipt).toBeDefined();
+ expect(result.receipt.patchSha).toBe('d15a07c1');
+ });
+ });
});
diff --git a/test/unit/domain/services/ObserverView.test.js b/test/unit/domain/services/ObserverView.test.js
index 0eb80f6e..6c6486f5 100644
--- a/test/unit/domain/services/ObserverView.test.js
+++ b/test/unit/domain/services/ObserverView.test.js
@@ -30,11 +30,9 @@ function addProp(state, nodeId, key, value) {
}
describe('ObserverView', () => {
- /** @type {any} */
/** @type {any} */
let mockPersistence;
/** @type {any} */
- /** @type {any} */
let graph;
beforeEach(async () => {
diff --git a/test/unit/domain/services/StreamingBitmapIndexBuilder.test.js b/test/unit/domain/services/StreamingBitmapIndexBuilder.test.js
index 82dbbc56..6238d6d8 100644
--- a/test/unit/domain/services/StreamingBitmapIndexBuilder.test.js
+++ b/test/unit/domain/services/StreamingBitmapIndexBuilder.test.js
@@ -19,11 +19,9 @@ function createMockEnvelope(data) {
}
describe('StreamingBitmapIndexBuilder', () => {
- /** @type {any} */
/** @type {any} */
let mockStorage;
/** @type {any} */
- /** @type {any} */
let writtenBlobs;
beforeEach(() => {
diff --git a/test/unit/domain/services/SyncController.test.js b/test/unit/domain/services/SyncController.test.js
new file mode 100644
index 00000000..979d75d0
--- /dev/null
+++ b/test/unit/domain/services/SyncController.test.js
@@ -0,0 +1,790 @@
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import SyncController from '../../../../src/domain/services/SyncController.js';
+import SyncError from '../../../../src/domain/errors/SyncError.js';
+import OperationAbortedError from '../../../../src/domain/errors/OperationAbortedError.js';
+
+const { timeoutMock, retryMock, httpSyncServerMock } = vi.hoisted(() => {
+ const timeoutMock = vi.fn(async (/** @type {number} */ _ms, /** @type {Function} */ fn) => {
+ const ac = new AbortController();
+ return await fn(ac.signal);
+ });
+ const retryMock = vi.fn(async (/** @type {Function} */ fn) => await fn());
+ const httpSyncServerMock = vi.fn().mockImplementation(() => ({
+ listen: vi.fn().mockResolvedValue({ close: vi.fn(), url: 'http://127.0.0.1:3000/sync' }),
+ }));
+ return { timeoutMock, retryMock, httpSyncServerMock };
+});
+
+vi.mock('../../../../src/domain/services/SyncProtocol.js', async (importOriginal) => {
+ const original = /** @type {Record} */ (await importOriginal());
+ return {
+ ...original,
+ applySyncResponse: vi.fn(),
+ syncNeeded: vi.fn(),
+ processSyncRequest: vi.fn(),
+ };
+});
+
+vi.mock('@git-stunts/alfred', async (importOriginal) => {
+ const original = /** @type {Record} */ (await importOriginal());
+ return {
+ ...original,
+ timeout: timeoutMock,
+ retry: retryMock,
+ };
+});
+
+vi.mock('../../../../src/domain/services/HttpSyncServer.js', () => ({
+ default: httpSyncServerMock,
+}));
+
+// Import after mock setup so we get the mocked versions
+const { applySyncResponse: applySyncResponseMock, syncNeeded: syncNeededMock, processSyncRequest: processSyncRequestMock } =
+ /** @type {Record} */ (/** @type {unknown} */ (await import('../../../../src/domain/services/SyncProtocol.js')));
+
+/**
+ * Creates a mock WarpGraph host for SyncController tests.
+ *
+ * @param {Record} [overrides]
+ * @returns {Record}
+ */
+function createMockHost(overrides = {}) {
+ return {
+ _cachedState: null,
+ _lastFrontier: null,
+ _stateDirty: false,
+ _patchesSinceGC: 0,
+ _graphName: 'test-graph',
+ _persistence: {
+ readRef: vi.fn().mockResolvedValue(null),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ _clock: { now: () => 0 },
+ _codec: {},
+ _crypto: {},
+ _logger: null,
+ _patchesSinceCheckpoint: 0,
+ _logTiming: vi.fn(),
+ materialize: vi.fn(),
+ discoverWriters: vi.fn().mockResolvedValue([]),
+ ...overrides,
+ };
+}
+
+describe('SyncController', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ });
+
+ describe('constructor', () => {
+ it('stores host reference', () => {
+ const host = createMockHost();
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ expect(ctrl._host).toBe(host);
+ });
+ });
+
+ describe('getFrontier', () => {
+ it('returns empty frontier when no writers exist', async () => {
+ const host = createMockHost();
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const frontier = await ctrl.getFrontier();
+
+ expect(frontier).toBeInstanceOf(Map);
+ expect(frontier.size).toBe(0);
+ expect(host.discoverWriters).toHaveBeenCalledOnce();
+ });
+
+ it('calls readRef for each discovered writer', async () => {
+ const host = createMockHost({
+ discoverWriters: vi.fn().mockResolvedValue(['alice', 'bob']),
+ _persistence: {
+ readRef: vi.fn()
+ .mockResolvedValueOnce('sha-alice')
+ .mockResolvedValueOnce('sha-bob'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const frontier = await ctrl.getFrontier();
+
+ expect(frontier.size).toBe(2);
+ expect(frontier.get('alice')).toBe('sha-alice');
+ expect(frontier.get('bob')).toBe('sha-bob');
+ expect(/** @type {*} */ (host._persistence).readRef).toHaveBeenCalledTimes(2);
+ });
+
+ it('skips writers with null tip SHA', async () => {
+ const host = createMockHost({
+ discoverWriters: vi.fn().mockResolvedValue(['alice', 'bob']),
+ _persistence: {
+ readRef: vi.fn()
+ .mockResolvedValueOnce('sha-alice')
+ .mockResolvedValueOnce(null),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const frontier = await ctrl.getFrontier();
+
+ expect(frontier.size).toBe(1);
+ expect(frontier.get('alice')).toBe('sha-alice');
+ });
+ });
+
+ describe('hasFrontierChanged', () => {
+ it('returns true when _lastFrontier is null', async () => {
+ const host = createMockHost({ _lastFrontier: null });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const changed = await ctrl.hasFrontierChanged();
+
+ expect(changed).toBe(true);
+ });
+
+ it('returns false when frontier matches _lastFrontier', async () => {
+ const lastFrontier = new Map([['alice', 'sha-a']]);
+ const host = createMockHost({
+ _lastFrontier: lastFrontier,
+ discoverWriters: vi.fn().mockResolvedValue(['alice']),
+ _persistence: {
+ readRef: vi.fn().mockResolvedValue('sha-a'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const changed = await ctrl.hasFrontierChanged();
+
+ expect(changed).toBe(false);
+ });
+
+ it('returns true when frontier size differs', async () => {
+ const lastFrontier = new Map([['alice', 'sha-a']]);
+ const host = createMockHost({
+ _lastFrontier: lastFrontier,
+ discoverWriters: vi.fn().mockResolvedValue(['alice', 'bob']),
+ _persistence: {
+ readRef: vi.fn()
+ .mockResolvedValueOnce('sha-a')
+ .mockResolvedValueOnce('sha-b'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const changed = await ctrl.hasFrontierChanged();
+
+ expect(changed).toBe(true);
+ });
+
+ it('returns true when a writer tip SHA differs', async () => {
+ const lastFrontier = new Map([['alice', 'sha-old']]);
+ const host = createMockHost({
+ _lastFrontier: lastFrontier,
+ discoverWriters: vi.fn().mockResolvedValue(['alice']),
+ _persistence: {
+ readRef: vi.fn().mockResolvedValue('sha-new'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const changed = await ctrl.hasFrontierChanged();
+
+ expect(changed).toBe(true);
+ });
+ });
+
+ describe('status', () => {
+ it('returns correct shape with no cached state', async () => {
+ const host = createMockHost({
+ _cachedState: null,
+ discoverWriters: vi.fn().mockResolvedValue([]),
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const result = await ctrl.status();
+
+ expect(result).toEqual({
+ cachedState: 'none',
+ patchesSinceCheckpoint: 0,
+ tombstoneRatio: 0,
+ writers: 0,
+ frontier: {},
+ });
+ });
+
+ it('reports stale when _stateDirty is true', async () => {
+ const host = createMockHost({
+ _cachedState: {
+ observedFrontier: new Map(),
+ nodeAlive: { entries: new Map(), tombstones: new Map() },
+ edgeAlive: { entries: new Map(), tombstones: new Map() },
+ },
+ _stateDirty: true,
+ _lastFrontier: new Map(),
+ discoverWriters: vi.fn().mockResolvedValue([]),
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const result = await ctrl.status();
+
+ expect(result.cachedState).toBe('stale');
+ });
+
+ it('reports fresh when frontier matches and not dirty', async () => {
+ const host = createMockHost({
+ _cachedState: {
+ observedFrontier: new Map(),
+ nodeAlive: { entries: new Map(), tombstones: new Map() },
+ edgeAlive: { entries: new Map(), tombstones: new Map() },
+ },
+ _stateDirty: false,
+ _lastFrontier: new Map(),
+ discoverWriters: vi.fn().mockResolvedValue([]),
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const result = await ctrl.status();
+
+ expect(result.cachedState).toBe('fresh');
+ });
+ });
+
+ describe('applySyncResponse', () => {
+ it('throws QueryError when no cached state', () => {
+ const host = createMockHost({ _cachedState: null });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ expect(() => ctrl.applySyncResponse({ type: 'sync-response', frontier: {}, patches: [] }))
+ .toThrow(/No materialized state/);
+ });
+
+ it('updates host state from applySyncResponseImpl result', () => {
+ const fakeState = {
+ observedFrontier: new Map(),
+ nodeAlive: { dots: new Map() },
+ edgeAlive: { dots: new Map() },
+ };
+ const newState = { observedFrontier: new Map(), nodeAlive: { dots: new Map() }, edgeAlive: { dots: new Map() } };
+ const newFrontier = new Map([['alice', 'sha-2']]);
+ applySyncResponseMock.mockReturnValue({ state: newState, frontier: newFrontier, applied: 3 });
+
+ const host = createMockHost({
+ _cachedState: fakeState,
+ _lastFrontier: new Map([['alice', 'sha-1']]),
+ _patchesSinceGC: 2,
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+ /** @type {{type: 'sync-response', frontier: Record, patches: *[]}} */
+ const response = { type: 'sync-response', frontier: {}, patches: [] };
+
+ const result = ctrl.applySyncResponse(response);
+
+ expect(result.applied).toBe(3);
+ expect(host._cachedState).toBe(newState);
+ expect(host._lastFrontier).toBe(newFrontier);
+ expect(host._patchesSinceGC).toBe(5);
+ expect(host._stateDirty).toBe(false);
+ expect(applySyncResponseMock).toHaveBeenCalledWith(
+ response,
+ fakeState,
+ expect.any(Map),
+ );
+ });
+
+ it('uses empty frontier when _lastFrontier is null', () => {
+ const fakeState = {
+ observedFrontier: new Map(),
+ nodeAlive: { dots: new Map() },
+ edgeAlive: { dots: new Map() },
+ };
+ const newFrontier = new Map([['bob', 'sha-b']]);
+ applySyncResponseMock.mockReturnValue({ state: fakeState, frontier: newFrontier, applied: 1 });
+
+ const host = createMockHost({
+ _cachedState: fakeState,
+ _lastFrontier: null,
+ _patchesSinceGC: 0,
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ ctrl.applySyncResponse({ type: 'sync-response', frontier: {}, patches: [] });
+
+ // Should have passed an empty Map (from createFrontier()) as the frontier arg
+ const calledFrontier = applySyncResponseMock.mock.calls[0][2];
+ expect(calledFrontier).toBeInstanceOf(Map);
+ expect(calledFrontier.size).toBe(0);
+ expect(host._lastFrontier).toBe(newFrontier);
+ });
+
+ it('passes _lastFrontier (not observedFrontier) to applySyncResponseImpl', () => {
+ const observedFrontier = new Map([['alice', 99]]);
+ const lastFrontier = new Map([['alice', 'sha-tip-1']]);
+ const fakeState = {
+ observedFrontier,
+ nodeAlive: { dots: new Map() },
+ edgeAlive: { dots: new Map() },
+ };
+ const newFrontier = new Map([['alice', 'sha-tip-2']]);
+ applySyncResponseMock.mockReturnValue({ state: fakeState, frontier: newFrontier, applied: 1 });
+
+ const host = createMockHost({
+ _cachedState: fakeState,
+ _lastFrontier: lastFrontier,
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ ctrl.applySyncResponse({ type: 'sync-response', frontier: {}, patches: [] });
+
+ const calledFrontier = applySyncResponseMock.mock.calls[0][2];
+ // Must be the SHA frontier map, not the VersionVector
+ expect(calledFrontier).toBe(lastFrontier);
+ expect(calledFrontier.get('alice')).toBe('sha-tip-1');
+ });
+ });
+
+ describe('syncNeeded', () => {
+ it('delegates to SyncProtocol.syncNeeded with local and remote frontiers', async () => {
+ syncNeededMock.mockReturnValue(true);
+ const host = createMockHost({
+ discoverWriters: vi.fn().mockResolvedValue(['alice']),
+ _persistence: {
+ readRef: vi.fn().mockResolvedValue('sha-alice'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+ const remoteFrontier = new Map([['bob', 'sha-bob']]);
+
+ const result = await ctrl.syncNeeded(remoteFrontier);
+
+ expect(result).toBe(true);
+ expect(syncNeededMock).toHaveBeenCalledWith(
+ expect.any(Map),
+ remoteFrontier,
+ );
+ // Verify local frontier was built correctly
+ const calledLocalFrontier = syncNeededMock.mock.calls[0][0];
+ expect(calledLocalFrontier.get('alice')).toBe('sha-alice');
+ });
+ });
+
+ describe('processSyncRequest', () => {
+ it('delegates to SyncProtocol.processSyncRequest with correct args', async () => {
+ const mockResponse = { type: 'sync-response', frontier: {}, patches: [] };
+ processSyncRequestMock.mockResolvedValue(mockResponse);
+ const host = createMockHost({
+ discoverWriters: vi.fn().mockResolvedValue(['alice']),
+ _persistence: {
+ readRef: vi.fn().mockResolvedValue('sha-alice'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+ const request = { type: 'sync-request', frontier: {} };
+
+ const result = await ctrl.processSyncRequest(/** @type {*} */ (request));
+
+ expect(result).toBe(mockResponse);
+ expect(processSyncRequestMock).toHaveBeenCalledWith(
+ request,
+ expect.any(Map),
+ host._persistence,
+ 'test-graph',
+ { codec: host._codec },
+ );
+ });
+ });
+
+ describe('syncWith', () => {
+ it('syncs with a direct peer using direct method calls', async () => {
+ const newState = { observedFrontier: new Map(), nodeAlive: { dots: new Map() }, edgeAlive: { dots: new Map() } };
+ const newFrontier = new Map([['alice', 'sha-a2'], ['bob', 'sha-b1']]);
+ applySyncResponseMock.mockReturnValue({ state: newState, frontier: newFrontier, applied: 2 });
+
+ const peerResponse = {
+ type: 'sync-response',
+ frontier: { bob: 'sha-b1' },
+ patches: [
+ { writerId: 'bob', sha: 'sha-b1', patch: { ops: [] } },
+ ],
+ };
+
+ const remotePeer = {
+ processSyncRequest: vi.fn().mockResolvedValue(peerResponse),
+ getFrontier: vi.fn().mockResolvedValue(new Map([['bob', 'sha-b1']])),
+ };
+
+ const fakeState = {
+ observedFrontier: new Map(),
+ nodeAlive: { dots: new Map() },
+ edgeAlive: { dots: new Map() },
+ };
+ const host = createMockHost({
+ _cachedState: fakeState,
+ _lastFrontier: new Map([['alice', 'sha-a1']]),
+ discoverWriters: vi.fn().mockResolvedValue(['alice']),
+ _persistence: {
+ readRef: vi.fn().mockResolvedValue('sha-a1'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const result = await ctrl.syncWith(/** @type {*} */ (remotePeer));
+
+ expect(result.applied).toBe(2);
+ expect(result.attempts).toBe(1);
+ expect(remotePeer.processSyncRequest).toHaveBeenCalledOnce();
+ expect(host._cachedState).toBe(newState);
+ });
+
+ it('calls host.materialize() when _cachedState is null before apply', async () => {
+ const materializedState = {
+ observedFrontier: new Map(),
+ nodeAlive: { dots: new Map() },
+ edgeAlive: { dots: new Map() },
+ };
+ const newFrontier = new Map([['alice', 'sha-a2']]);
+ applySyncResponseMock.mockReturnValue({ state: materializedState, frontier: newFrontier, applied: 0 });
+
+ const peerResponse = {
+ type: 'sync-response',
+ frontier: {},
+ patches: [],
+ };
+ const remotePeer = {
+ processSyncRequest: vi.fn().mockResolvedValue(peerResponse),
+ getFrontier: vi.fn().mockResolvedValue(new Map()),
+ };
+
+ const host = createMockHost({
+ _cachedState: null,
+ _lastFrontier: null,
+ discoverWriters: vi.fn().mockResolvedValue([]),
+ materialize: vi.fn().mockImplementation(async function () {
+ host._cachedState = materializedState;
+ }),
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ await ctrl.syncWith(/** @type {*} */ (remotePeer));
+
+ expect(host.materialize).toHaveBeenCalledOnce();
+ });
+
+ it('returns state when materialize option is true', async () => {
+ const fakeState = {
+ observedFrontier: new Map(),
+ nodeAlive: { dots: new Map() },
+ edgeAlive: { dots: new Map() },
+ };
+ const newFrontier = new Map([['alice', 'sha-a2']]);
+ applySyncResponseMock.mockReturnValue({ state: fakeState, frontier: newFrontier, applied: 1 });
+
+ const peerResponse = {
+ type: 'sync-response',
+ frontier: {},
+ patches: [],
+ };
+ const remotePeer = {
+ processSyncRequest: vi.fn().mockResolvedValue(peerResponse),
+ getFrontier: vi.fn().mockResolvedValue(new Map()),
+ };
+
+ const host = createMockHost({
+ _cachedState: fakeState,
+ _lastFrontier: new Map(),
+ discoverWriters: vi.fn().mockResolvedValue([]),
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const result = await ctrl.syncWith(/** @type {*} */ (remotePeer), { materialize: true });
+
+ expect(result.state).toBe(fakeState);
+ expect(result.applied).toBe(1);
+ });
+ });
+
+ describe('createSyncRequest', () => {
+ it('returns a sync request with the local frontier', async () => {
+ const host = createMockHost({
+ discoverWriters: vi.fn().mockResolvedValue(['alice']),
+ _persistence: {
+ readRef: vi.fn().mockResolvedValue('sha-alice'),
+ listRefs: vi.fn().mockResolvedValue([]),
+ },
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ const request = await ctrl.createSyncRequest();
+
+ expect(request).toHaveProperty('type', 'sync-request');
+ expect(request).toHaveProperty('frontier');
+ expect(request.frontier).toMatchObject({ alice: 'sha-alice' });
+ });
+ });
+
+ describe('serve', () => {
+ it('throws when port is not a number', async () => {
+ const host = createMockHost();
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ await expect(ctrl.serve(/** @type {*} */ ({ port: 'bad', httpPort: {} })))
+ .rejects.toThrow('serve() requires a numeric port');
+ });
+
+ it('throws when httpPort is missing', async () => {
+ const host = createMockHost();
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ await expect(ctrl.serve(/** @type {*} */ ({ port: 3000 })))
+ .rejects.toThrow('serve() requires an httpPort adapter');
+ });
+
+ it('instantiates HttpSyncServer with correct args', async () => {
+ httpSyncServerMock.mockClear();
+ const host = createMockHost();
+ const ctrl = new SyncController(/** @type {*} */ (host));
+ const httpPort = { listen: vi.fn() };
+
+ await ctrl.serve(/** @type {*} */ ({
+ port: 3000,
+ httpPort,
+ path: '/custom',
+ maxRequestBytes: 1024,
+ }));
+
+ expect(httpSyncServerMock).toHaveBeenCalledOnce();
+ const args = httpSyncServerMock.mock.calls[0][0];
+ expect(args.httpPort).toBe(httpPort);
+ expect(args.path).toBe('/custom');
+ expect(args.maxRequestBytes).toBe(1024);
+ expect(args.host).toBe('127.0.0.1');
+ });
+
+ it('enhances auth config with crypto and logger from host', async () => {
+ httpSyncServerMock.mockClear();
+ const mockCrypto = { subtle: {} };
+ const mockLogger = { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() };
+ const host = createMockHost({
+ _crypto: mockCrypto,
+ _logger: mockLogger,
+ });
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ await ctrl.serve(/** @type {*} */ ({
+ port: 3000,
+ httpPort: { listen: vi.fn() },
+ auth: { keys: { k: 's' } },
+ }));
+
+ const args = httpSyncServerMock.mock.calls[0][0];
+ expect(args.auth.crypto).toBe(mockCrypto);
+ expect(args.auth.logger).toBe(mockLogger);
+ expect(args.auth.keys).toEqual({ k: 's' });
+ });
+
+ it('passes graph host as graph argument to HttpSyncServer', async () => {
+ httpSyncServerMock.mockClear();
+ const host = createMockHost();
+ const ctrl = new SyncController(/** @type {*} */ (host));
+
+ await ctrl.serve(/** @type {*} */ ({
+ port: 3000,
+ httpPort: { listen: vi.fn() },
+ }));
+
+ const args = httpSyncServerMock.mock.calls[0][0];
+ expect(args.graph).toBe(host);
+ });
+ });
+
+ describe('syncWith HTTP path', () => {
+ /** @type {import('vitest').Mock} */
+ let fetchMock;
+ /** @type {ReturnType} */
+ let host;
+ /** @type {SyncController} */
+ let ctrl;
+
+ beforeEach(() => {
+ fetchMock = vi.fn();
+ vi.stubGlobal('fetch', fetchMock);
+
+ const fakeState = {
+ observedFrontier: new Map(),
+ nodeAlive: { dots: new Map() },
+ edgeAlive: { dots: new Map() },
+ };
+ host = createMockHost({
+ _cachedState: fakeState,
+ _lastFrontier: new Map(),
+ discoverWriters: vi.fn().mockResolvedValue([]),
+ });
+ ctrl = new SyncController(/** @type {*} */ (host));
+
+ // Default: retry calls fn once, timeout passes through
+ retryMock.mockImplementation(async (fn) => await fn());
+ timeoutMock.mockImplementation(async (_ms, fn) => {
+ const ac = new AbortController();
+ return await fn(ac.signal);
+ });
+ });
+
+ afterEach(() => {
+ vi.unstubAllGlobals();
+ });
+
+ it('successful HTTP sync returns applied count', async () => {
+ const validResponse = {
+ type: 'sync-response',
+ frontier: { alice: 'sha-a' },
+ patches: [],
+ };
+ fetchMock.mockResolvedValue({
+ status: 200,
+ json: () => Promise.resolve(validResponse),
+ });
+ const newFrontier = new Map([['alice', 'sha-a']]);
+ applySyncResponseMock.mockReturnValue({
+ state: host._cachedState,
+ frontier: newFrontier,
+ applied: 5,
+ });
+
+ const result = await ctrl.syncWith('http://peer:3000/sync');
+
+ expect(result.applied).toBe(5);
+ expect(applySyncResponseMock).toHaveBeenCalled();
+ });
+
+ it('5xx status throws SyncError with E_SYNC_REMOTE', async () => {
+ fetchMock.mockResolvedValue({ status: 502 });
+
+ await expect(ctrl.syncWith('http://peer:3000/sync'))
+ .rejects.toMatchObject({
+ code: 'E_SYNC_REMOTE',
+ });
+ });
+
+ it('4xx status throws SyncError with E_SYNC_PROTOCOL', async () => {
+ fetchMock.mockResolvedValue({ status: 400 });
+
+ await expect(ctrl.syncWith('http://peer:3000/sync'))
+ .rejects.toMatchObject({
+ code: 'E_SYNC_PROTOCOL',
+ });
+ });
+
+ it('invalid JSON response throws SyncError with E_SYNC_PROTOCOL', async () => {
+ fetchMock.mockResolvedValue({
+ status: 200,
+ json: () => Promise.reject(new SyntaxError('Unexpected token')),
+ });
+
+ await expect(ctrl.syncWith('http://peer:3000/sync'))
+ .rejects.toMatchObject({
+ code: 'E_SYNC_PROTOCOL',
+ });
+ });
+
+ it('AbortError throws OperationAbortedError', async () => {
+ const abortErr = new Error('aborted');
+ abortErr.name = 'AbortError';
+ timeoutMock.mockRejectedValue(abortErr);
+
+ await expect(ctrl.syncWith('http://peer:3000/sync'))
+ .rejects.toBeInstanceOf(OperationAbortedError);
+ });
+
+ it('TimeoutError throws SyncError with E_SYNC_TIMEOUT', async () => {
+ const { TimeoutError } = await import('@git-stunts/alfred');
+ timeoutMock.mockRejectedValue(new TimeoutError(10000, 10001));
+
+ await expect(ctrl.syncWith('http://peer:3000/sync'))
+ .rejects.toMatchObject({
+ code: 'E_SYNC_TIMEOUT',
+ });
+ });
+
+ it('network error throws SyncError with E_SYNC_NETWORK', async () => {
+ timeoutMock.mockImplementation(async (_ms, fn) => {
+ const ac = new AbortController();
+ return await fn(ac.signal);
+ });
+ fetchMock.mockRejectedValue(new TypeError('fetch failed'));
+
+ await expect(ctrl.syncWith('http://peer:3000/sync'))
+ .rejects.toMatchObject({
+ code: 'E_SYNC_NETWORK',
+ });
+ });
+
+ it('shouldRetry: retries on E_SYNC_REMOTE but not E_SYNC_PROTOCOL', async () => {
+ fetchMock.mockResolvedValue({ status: 502 });
+
+ // Capture shouldRetry from retry mock
+ /** @type {((err: unknown) => boolean) | undefined} */
+ let capturedShouldRetry;
+ /** @type {*} */ (retryMock).mockImplementation(async (/** @type {Function} */ fn, /** @type {*} */ opts) => {
+ capturedShouldRetry = opts.shouldRetry;
+ return await fn();
+ });
+
+ try {
+ await ctrl.syncWith('http://peer:3000/sync');
+ } catch {
+ // Expected to throw
+ }
+
+ expect(capturedShouldRetry).toBeDefined();
+ const shouldRetry = /** @type {(err: unknown) => boolean} */ (capturedShouldRetry);
+
+ expect(shouldRetry(new SyncError('remote', { code: 'E_SYNC_REMOTE' }))).toBe(true);
+ expect(shouldRetry(new SyncError('timeout', { code: 'E_SYNC_TIMEOUT' }))).toBe(true);
+ expect(shouldRetry(new SyncError('network', { code: 'E_SYNC_NETWORK' }))).toBe(true);
+ expect(shouldRetry(new SyncError('protocol', { code: 'E_SYNC_PROTOCOL' }))).toBe(false);
+ });
+
+ it('passes auth headers to fetch when auth option provided', async () => {
+ // Provide a crypto mock that supports hash + hmac
+ const mockCrypto = {
+ hash: vi.fn().mockResolvedValue('abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'),
+ hmac: vi.fn().mockResolvedValue('hmac-signature-hex'),
+ };
+ host._crypto = mockCrypto;
+
+ const validResponse = {
+ type: 'sync-response',
+ frontier: {},
+ patches: [],
+ };
+ fetchMock.mockResolvedValue({
+ status: 200,
+ json: () => Promise.resolve(validResponse),
+ });
+ applySyncResponseMock.mockReturnValue({
+ state: host._cachedState,
+ frontier: new Map(),
+ applied: 0,
+ });
+
+ await ctrl.syncWith('http://peer:3000/sync', {
+ auth: { secret: 'test-secret', keyId: 'k1' },
+ });
+
+ expect(fetchMock).toHaveBeenCalledOnce();
+ const fetchHeaders = fetchMock.mock.calls[0][1].headers;
+ // Auth headers should contain x-warp-* prefixed headers
+ const authHeaderKeys = Object.keys(fetchHeaders).filter(k => k.startsWith('x-warp-'));
+ expect(authHeaderKeys.length).toBeGreaterThan(0);
+ });
+ });
+});
diff --git a/test/unit/domain/services/TranslationCost.test.js b/test/unit/domain/services/TranslationCost.test.js
index 6a24288b..e6d23bbd 100644
--- a/test/unit/domain/services/TranslationCost.test.js
+++ b/test/unit/domain/services/TranslationCost.test.js
@@ -32,11 +32,9 @@ function addProp(state, nodeId, key, value) {
}
describe('TranslationCost', () => {
- /** @type {any} */
/** @type {any} */
let mockPersistence;
/** @type {any} */
- /** @type {any} */
let graph;
beforeEach(async () => {
diff --git a/test/unit/domain/services/logging.integration.test.js b/test/unit/domain/services/logging.integration.test.js
index 93a6ca32..1ae4969a 100644
--- a/test/unit/domain/services/logging.integration.test.js
+++ b/test/unit/domain/services/logging.integration.test.js
@@ -37,14 +37,11 @@ function createMockLogger() {
describe('Service Logging Integration', () => {
describe('IndexRebuildService', () => {
- /** @type {any} */
/** @type {any} */
let mockGraphService;
/** @type {any} */
- /** @type {any} */
let mockStorage;
/** @type {any} */
- /** @type {any} */
let mockLogger;
beforeEach(() => {
@@ -57,7 +54,7 @@ describe('Service Logging Integration', () => {
mockStorage = {
writeBlob: vi.fn().mockResolvedValue('blob-oid'),
writeTree: vi.fn().mockResolvedValue('tree-oid'),
- readTreeOids: vi.fn().mockResolvedValue({ 'meta_ab.json': 'meta-oid' }),
+ readTreeOids: vi.fn().mockResolvedValue({ 'meta_ab.json': 'aaa1bbb2ccc3ddd4eee5fff6aaa1bbb2ccc3ddd4' }),
};
mockLogger = createMockLogger();
});
@@ -159,11 +156,9 @@ describe('Service Logging Integration', () => {
});
describe('BitmapIndexReader', () => {
- /** @type {any} */
/** @type {any} */
let mockStorage;
/** @type {any} */
- /** @type {any} */
let mockLogger;
beforeEach(() => {
@@ -190,7 +185,7 @@ describe('Service Logging Integration', () => {
logger: mockLogger,
crypto,
}));
- reader.setup({ 'meta_sh.json': 'blob-oid' });
+ reader.setup({ 'meta_sh.json': 'aaa1bbb2ccc3ddd4eee5fff6aaa7bbb8ccc9ddd0' });
const id = await reader.lookupId('sha123');
diff --git a/test/unit/domain/utils/validateShardOid.test.js b/test/unit/domain/utils/validateShardOid.test.js
new file mode 100644
index 00000000..31d132f8
--- /dev/null
+++ b/test/unit/domain/utils/validateShardOid.test.js
@@ -0,0 +1,58 @@
+import { describe, it, expect } from 'vitest';
+import { isValidShardOid } from '../../../../src/domain/utils/validateShardOid.js';
+
+describe('isValidShardOid', () => {
+ it('accepts valid 40-char hex OID', () => {
+ expect(isValidShardOid('a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2')).toBe(true);
+ });
+
+ it('accepts valid 64-char hex OID', () => {
+ expect(isValidShardOid('a'.repeat(64))).toBe(true);
+ });
+
+ it('accepts valid 4-char hex OID (minimum length)', () => {
+ expect(isValidShardOid('abcd')).toBe(true);
+ });
+
+ it('accepts uppercase hex', () => {
+ expect(isValidShardOid('ABCDEF1234567890ABCDEF1234567890ABCDEF12')).toBe(true);
+ });
+
+ it('rejects empty string', () => {
+ expect(isValidShardOid('')).toBe(false);
+ });
+
+ it('rejects string shorter than 4 chars', () => {
+ expect(isValidShardOid('abc')).toBe(false);
+ });
+
+ it('rejects string longer than 64 chars', () => {
+ expect(isValidShardOid('a'.repeat(65))).toBe(false);
+ });
+
+ it('rejects non-hex characters', () => {
+ expect(isValidShardOid('g1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2')).toBe(false);
+ });
+
+ it('rejects non-string input', () => {
+ expect(isValidShardOid(/** @type {any} */ (123))).toBe(false);
+ expect(isValidShardOid(/** @type {any} */ (null))).toBe(false);
+ expect(isValidShardOid(/** @type {any} */ (undefined))).toBe(false);
+ });
+
+ it('accepts mixed-case hex', () => {
+ expect(isValidShardOid('aAbBcCdD')).toBe(true);
+ });
+
+ it('rejects dash in OID', () => {
+ expect(isValidShardOid('a1b2-c3d4')).toBe(false);
+ });
+
+ it('rejects dot in OID', () => {
+ expect(isValidShardOid('a1b2.c3d4')).toBe(false);
+ });
+
+ it('rejects space in OID', () => {
+ expect(isValidShardOid('a1b2 c3d4')).toBe(false);
+ });
+});