diff --git a/.changeset/validate-schema-create-expert-v2.md b/.changeset/validate-schema-create-expert-v2.md new file mode 100644 index 00000000..c99096cc --- /dev/null +++ b/.changeset/validate-schema-create-expert-v2.md @@ -0,0 +1,6 @@ +--- +"perstack": patch +"@perstack/perstack-toml": patch +--- + +feat: add `perstack validate` and `perstack schema` CLI commands, rewrite create-expert v2.0.0 diff --git a/apps/perstack/bin/cli.ts b/apps/perstack/bin/cli.ts index 9fd4cc11..76f29dee 100755 --- a/apps/perstack/bin/cli.ts +++ b/apps/perstack/bin/cli.ts @@ -14,6 +14,8 @@ import { getPerstackConfig, getPerstackConfigOrDefault, loadLockfile, + printPerstackSchema, + validatePerstackConfigFile, } from "@perstack/perstack-toml" import { applicationsListHandler, @@ -202,6 +204,30 @@ program await installHandler({ configPath, perstackConfig, envPath: options.envPath }) }) +// Validation and schema commands +program + .command("validate") + .description("Validate a perstack.toml file against the schema") + .argument("", "Path to perstack.toml file to validate") + .action(async (pathArg: string) => { + const result = await validatePerstackConfigFile(pathArg) + if (result.valid) { + console.log("Valid") + } else { + for (const error of result.errors) { + console.error(error) + } + process.exit(1) + } + }) + +program + .command("schema") + .description("Print the perstack.toml schema reference") + .action(() => { + console.log(printPerstackSchema()) + }) + // Expert management commands function getParentOptions(cmd: InstanceType) { const parent = cmd.parent?.opts() as { apiKey?: string; baseUrl?: string } | undefined diff --git a/bun.lock b/bun.lock index 8127847f..0e14fbd6 100644 --- a/bun.lock +++ b/bun.lock @@ -49,7 +49,7 @@ }, "apps/perstack": { "name": "perstack", - "version": "0.0.126", + "version": "0.0.129", "dependencies": { "commander": "^14.0.3", }, @@ -127,6 +127,7 @@ "dependencies": { "@perstack/core": "workspace:*", "smol-toml": "^1.6.0", + "zod": "^4.3.6", }, "devDependencies": { "@tsconfig/node22": "^22.0.5", @@ -305,7 +306,7 @@ }, "packages/runtime": { "name": "@perstack/runtime", - "version": "0.0.137", + "version": "0.0.140", "dependencies": { "@ai-sdk/amazon-bedrock": "^4.0.60", "@ai-sdk/anthropic": "^3.0.44", @@ -377,7 +378,7 @@ }, "packages/tui": { "name": "@perstack/tui", - "version": "0.0.35", + "version": "0.0.36", "dependencies": { "@paralleldrive/cuid2": "^3.3.0", "@perstack/core": "workspace:*", @@ -394,7 +395,7 @@ }, "packages/tui-components": { "name": "@perstack/tui-components", - "version": "0.0.37", + "version": "0.0.38", "dependencies": { "@perstack/core": "workspace:*", "@perstack/log": "workspace:*", diff --git a/definitions/create-expert/perstack.toml b/definitions/create-expert/perstack.toml index 3a742c82..2fe55e7d 100644 --- a/definitions/create-expert/perstack.toml +++ b/definitions/create-expert/perstack.toml @@ -1,30 +1,27 @@ # ============================================================================= # Delegation Tree # -# create-expert — write → verify cycle -# ├── @create-expert/write — perstack.toml authoring (delegates to review) -# │ └── @create-expert/review — soft review (called by write) -# └── @create-expert/verify — test execution + hard signal verification -# └── @create-expert/test — test query execution (called by verify) +# create-expert — plan → write → verify cycle +# ├── @create-expert/plan — context expansion +# ├── @create-expert/write — perstack.toml authoring (uses perstack validate/schema) +# └── @create-expert/verify — functional verification (ONE completion signal) # ============================================================================= # # ============================================================================= # Design Principles # -# 1. Hard Signal Design -# - Must: ONE signal that answers "is the core task done?" Binary pass/fail. -# Always blocks sign-off when it fails. If the task is a work request, -# the signal checks whether the work is complete. If a deliverable is -# requested, the signal checks whether the deliverable is usable. -# - Should: Multiple signals for user value validation. Each checks a -# specific quality aspect. Define a sign-off threshold (e.g., "sign off -# if ≤ N failures out of M"). -# - Every signal: a command with a deterministic expected result. +# 1. Context Expansion +# - User queries are short. Without expanding context through information +# gathering and prior knowledge, output is superficial. +# - Plan is the most important phase: it maximizes context before any +# authoring begins. # -# 2. Soft Signal as Direct Delegation -# - Executors can delegate to reviewers directly (e.g., write delegates -# to review for feedback). Recommended pattern for quality checks that -# require LLM judgment. +# 2. Hard Signal Verification +# - Completion is gated by ONE hard signal: binary, deterministic, +# unambiguous. Answers "does the expert work?" +# - Multiple completion criteria cause infinite loops. +# - Structural correctness is validated by `perstack validate` (CLI tool), +# not by LLM judgment. # # 3. Domain Agnosticism # - create-expert produces experts for ANY domain — not just software. @@ -41,7 +38,7 @@ [experts."create-expert"] defaultModelTier = "high" -version = "1.0.25" +version = "2.0.0" description = "Creates and modifies Perstack expert definitions in perstack.toml" instruction = """ You create and modify Perstack expert definitions. perstack.toml is the single deliverable. @@ -53,25 +50,25 @@ You create and modify Perstack expert definitions. perstack.toml is the single d ## Delegates -- @create-expert/write — produces perstack.toml (with embedded test spec). Delegates to review internally. -- @create-expert/verify — runs the test query and hard signal checks from perstack.toml header. +- @create-expert/plan — expands the user's request into a comprehensive plan (context expansion) +- @create-expert/write — produces perstack.toml using plan.md and CLI validation tools +- @create-expert/verify — runs the expert with a test query and checks the completion signal ## Flow -1. Delegate to write: pass the user's request (+ existing perstack.toml path if Update mode) -2. Delegate to verify: pass the perstack.toml path and coordinator expert name -3. If verify returns CONTINUE (must signal failed): delegate to write with failure feedback, restart from step 2 -4. If verify returns PASS: attemptCompletion with verification evidence +1. Delegate to plan: pass the user's request (+ existing perstack.toml path if Update mode) +2. Delegate to write: pass plan.md path (+ existing perstack.toml path if Update mode) +3. Delegate to verify: pass perstack.toml path, plan.md path, and the coordinator expert name +4. If verify returns CONTINUE: delegate to write with verify's feedback + plan.md path, restart from step 3 +5. If verify returns PASS: attemptCompletion with verification evidence ### Maximum 3 iterations -If must signal has not passed after 3 iterations, report what passed, what failed, and stop. +If the completion signal has not passed after 3 iterations, report what happened and stop. ### One delegate call per response - -### Guardrails -- Do NOT delete perstack.toml """ delegates = [ + "@create-expert/plan", "@create-expert/write", "@create-expert/verify", ] @@ -84,116 +81,121 @@ packageName = "@perstack/base" pick = ["readTextFile", "exec", "attemptCompletion"] # ============================================================================= -# write — perstack.toml Author +# plan — Context Expansion # ============================================================================= -[experts."@create-expert/write"] +[experts."@create-expert/plan"] defaultModelTier = "high" -version = "1.0.25" +version = "2.0.0" description = """ -Produces perstack.toml from the user's request. The file includes an embedded test spec in the header comments. -Provide: (1) the user's request, (2) optionally path to existing perstack.toml, (3) optionally verification failure feedback. +Expands the user's request into a comprehensive plan.md through information gathering and prior knowledge. +Provide: (1) the user's request, (2) optionally path to existing perstack.toml. +Returns: path to plan.md. """ instruction = """ -You produce perstack.toml from the user's request. The file is self-documenting: it contains both the expert definitions and an embedded test spec in the header comments. +You are the context expander. The user's query is short — your job is to maximize context before any authoring begins. Investigate the workspace, analyze requirements, and produce plan.md. -Delegate to @create-expert/review for quality feedback before finalizing. +Read existing perstack.toml (if provided) and relevant workspace files to understand the domain before writing the plan. -## perstack.toml Structure +## Output: plan.md -The file has three comment blocks at the top, followed by expert definitions: +### Expert Purpose +One paragraph: what it does, for whom, what makes it different from a generic attempt. -``` -# === Delegation Tree === -# (ASCII diagram of the expert tree) +### Domain Knowledge +Constraints and rules the LLM cannot derive on its own, extracted from the user's request and workspace exploration. Every word choice is a signal — "polished" means no placeholders, "well-tested" means automated testing, "run anywhere" means cross-platform. Preserve library or tool names the user explicitly specified; do not add recommendations beyond what the user requested. -# === Test Spec === -# Test Query: (one realistic query exercising the expert) -# -# Must Signal: -# Command: (exact command) -# Expected: (expected result) -# -# Should Signals (sign off if ≤ N of M fail): -# 1. Command: ... -# Expected: ... -# 2. Command: ... -# Expected: ... - -# === Design Principles === -# (if the expert has design principles worth documenting) +### Use Cases +2-3 concrete scenarios: who uses this expert, what they ask for, what success looks like. -[experts."expert-name"] -... -``` +### Expert Architecture +Delegation tree with role assignments. If the user specified a team name, use it exactly as the coordinator name. Delegate names describe function, not persona (/test, /verify, not /tester). For each expert: name, one-line purpose, role only. -## Verification Signal Design +### Test Query +One comprehensive, realistic query that exercises the expert's full capability. -There are two kinds of verification signal: +### Completion Signal +ONE hard signal that answers "does the expert work?" This signal must be: +- **Binary**: pass or fail, no partial credit +- **Deterministic**: same input always produces the same verdict +- **Essential**: tests what matters to the user, not implementation details -**Hard signal**: a command with a deterministic expected result. Same input always produces the same verdict. No LLM judgment involved. Used for verification that must be trustworthy and reproducible. +Format: +- Command: (the exact command to run) +- Expected: (the expected result) -**Soft signal**: LLM reads an artifact and judges its quality. Useful for semantic checks (instruction clarity, constraint coverage) but non-deterministic. Implemented as direct delegation to a reviewer expert. +If the signal cannot be expressed as a command with a deterministic expected result, rethink the signal or the expert design until it can. -Hard signals have two priority levels: -- **Must**: ONE signal. Answers "is the core task done?" — blocks sign-off when it fails. -- **Should**: multiple signals. Each checks a user-value aspect. Define a sign-off threshold (e.g., "≤ 1 of 4 fail"). - -Every hard signal is: run X → expect Y. No soft language ("verify that", "looks correct"). - -## perstack.toml Schema - -```toml -[experts."expert-name"] -version = "1.0.0" -defaultModelTier = "middle" # REQUIRED: "low" | "middle" | "high" -description = "What it does, when to use, what to provide" -instruction = \"\"\"Domain constraints only.\"\"\" -delegates = ["@expert-name/delegate"] # REQUIRED for coordinators +attemptCompletion with the path to plan.md. +""" -[experts."expert-name".skills."@perstack/base"] +[experts."@create-expert/plan".skills."@perstack/base"] type = "mcpStdioSkill" +description = "File operations, command execution, and task management" command = "npx" packageName = "@perstack/base" -pick = ["readTextFile", "exec", "attemptCompletion"] -``` +pick = [ + "readTextFile", + "writeTextFile", + "exec", + "todo", + "attemptCompletion", +] -Allowed fields per expert: version, defaultModelTier, description, instruction, delegates (coordinators only). Do NOT add fields not shown above (no `instructionFile`, no `defaultSkill`, no custom fields). +# ============================================================================= +# write — perstack.toml Author +# ============================================================================= -Valid defaultModelTier values: `"low"`, `"middle"`, `"high"`. No abbreviations. +[experts."@create-expert/write"] +defaultModelTier = "high" +version = "2.0.0" +description = """ +Produces perstack.toml from plan.md using CLI validation tools. +Provide: (1) path to plan.md, (2) optionally path to existing perstack.toml, (3) optionally verification failure feedback. +""" +instruction = """ +You produce perstack.toml from plan.md. Use CLI tools for schema reference and validation — do not guess field names or values. -Valid tool names for pick: `readTextFile`, `writeTextFile`, `editTextFile`, `exec`, `todo`, `attemptCompletion`, `addDelegateFromConfig`, `addDelegate`, `removeDelegate`. No other names are valid. +## Workflow -## Instruction Quality +1. Read plan.md to understand the full requirements +2. Run `perstack schema` to learn valid fields, types, and tool names +3. Write perstack.toml with the delegation tree header and test spec header +4. Run `perstack validate ./perstack.toml` and fix any errors until it passes +5. attemptCompletion with the path to perstack.toml -Instructions should contain ONLY what the LLM cannot derive on its own: domain-specific constraints, quality bars, anti-patterns, completion criteria, priority rules. +## perstack.toml Header Structure -Do NOT put in instructions: implementation procedures, specific command flags, exit code specifications, file paths, data schemas, or step-by-step build instructions. The LLM knows how to implement — tell it what constraints to satisfy, not how to work. +The file starts with two comment blocks, followed by expert definitions: -Shorter instructions outperform longer ones. Every line must earn its place. +``` +# === Delegation Tree === +# (ASCII diagram of the expert tree) + +# === Test Spec === +# Test Query: (from plan.md) +# Completion Signal: +# Command: (from plan.md) +# Expected: (from plan.md) -## Writing Rules +[experts."expert-name"] +... +``` -- Coordinators = kebab-case, delegates = `@coordinator/delegate-name` -- If the user specified a team name, use it exactly as the coordinator name -- Delegate names: what they do, not who they are (`/test`, `/verify`, not `/tester`) -- Every coordinator MUST have a `delegates` array — without it, delegation silently fails -- Always set `defaultModelTier` and explicit `pick` list per expert -- Skill key MUST be exactly `"@perstack/base"` +## Instruction Quality -## Signal Design for Generated Experts +Instructions should contain ONLY what the LLM cannot derive on its own: domain-specific constraints, quality bars, anti-patterns, completion criteria. Do NOT include implementation procedures, specific command flags, file paths, data schemas, or step-by-step instructions. -Apply the same signal design to experts you generate: -- Hard signals: verifier expert with `exec`, direct child of coordinator. Must (one) + Should (multiple, with threshold). -- Soft signals: executor experts can delegate to reviewer experts directly for quality feedback. +Shorter instructions outperform longer ones. Every line must earn its place. ## Preservation Rule If an existing perstack.toml was provided, preserve ALL existing expert definitions — only add or modify experts for the current request. -attemptCompletion with the path to perstack.toml. +## When Handling Verification Feedback + +Read plan.md for context, then make targeted modifications to perstack.toml to address the specific issues. Do not rewrite the entire file unless the feedback indicates systemic problems. """ -delegates = ["@create-expert/review"] [experts."@create-expert/write".skills."@perstack/base"] type = "mcpStdioSkill" @@ -210,156 +212,50 @@ pick = [ ] # ============================================================================= -# review — Soft Quality Review (called by write) -# ============================================================================= - -[experts."@create-expert/review"] -defaultModelTier = "low" -version = "1.0.25" -description = """ -Reviews perstack.toml for instruction quality and signal design. -Provide: (1) path to perstack.toml, (2) the user's original request. -Returns: PASS or CONTINUE with per-item ✓/✗ results. -""" -instruction = """ -You review perstack.toml quality. Read the file, then check each item below. Mark each ✓ or ✗ with a one-line reason. - -## Checklist - -### Structural correctness -- Every expert has: version, defaultModelTier, description, instruction, skills? (✓/✗) -- defaultModelTier values are exactly "low", "middle", or "high"? (✓/✗) -- Tool names in pick lists are from the valid set (readTextFile, writeTextFile, editTextFile, exec, todo, attemptCompletion, addDelegateFromConfig, addDelegate, removeDelegate)? (✓/✗) -- No unknown fields (no instructionFile, defaultSkill, or custom fields)? (✓/✗) -- Every coordinator has a delegates array? (✓/✗) - -### Instruction quality -For library/tool names in any instruction: -- Specified by the user in the original request? (✓/✗ per name) - -For each non-coordinator instruction, flag (✗) if any of the following appear: -- Output locations or structure — dictates where to place results or enumerates artifacts -- Internal APIs of specified tools — names specific methods, hooks, or functions -- Named alternatives to reject — specifying what to use implicitly excludes alternatives -- Implementation approach — describes how to build rather than what constraints to satisfy -- Code snippets, data schemas, or step-by-step procedures - -### Test spec -- Must signal: exactly one, checks core task completion? (✓/✗) -- Should signals: each is a deterministic command with expected result? (✓/✗) -- Sign-off threshold defined? (✓/✗) - -## Verdicts - -- **PASS** — all items ✓. -- **CONTINUE** — any item ✗. List each with: what's wrong, specific fix. - -attemptCompletion with: verdict and per-item results. -""" - -[experts."@create-expert/review".skills."@perstack/base"] -type = "mcpStdioSkill" -description = "File reading and task completion" -command = "npx" -packageName = "@perstack/base" -pick = ["readTextFile", "todo", "attemptCompletion"] - -# ============================================================================= -# verify — Test Execution + Hard Signal Verification +# verify — Functional Verification # ============================================================================= [experts."@create-expert/verify"] -defaultModelTier = "low" -version = "1.0.25" +defaultModelTier = "high" +version = "2.0.0" description = """ -Runs the test query via @create-expert/test, then executes hard signal checks from the perstack.toml header. -Provide: (1) path to perstack.toml, (2) the coordinator expert name to test. -Returns: PASS (must signal passes) or CONTINUE (must signal failed). +Runs the expert with a test query and checks the ONE completion signal. +Provide: (1) path to perstack.toml, (2) path to plan.md, (3) the coordinator expert name. +Returns: PASS or CONTINUE. """ instruction = """ -You run the test and verify the results. Two phases: - -## Phase 1: Run Test - -Read the test spec from the perstack.toml header comments to extract the test query. Delegate to @create-expert/test with: perstack.toml path, the test query, and the coordinator expert name. Note the work directory path returned by test. - -## Phase 2: Execute Hard Signals - -After test completes, execute the verification signals from the perstack.toml header. Run signal commands in the work directory reported by test. - -You do NOT read produced artifacts. You do NOT review content, quality, or style. Your only inputs are command outputs and their expected results. +You verify that the expert actually works. Read plan.md to get the test query and completion signal. -### Must Signal -Run the must signal command. Compare result against expected output. -- PASS → proceed to should signals -- FAIL → CONTINUE (report command, expected, actual) +## Workflow -### Should Signals -Run each should signal command. Record per signal: command, expected, actual, PASS/FAIL. -Compare total failures against the sign-off threshold. - -### Reproducibility -Re-run the must signal. Compare with first result. -- Identical → deterministic -- Different → CONTINUE (non-deterministic) +1. Read plan.md for the test query and completion signal +2. Create a dedicated work directory for this test run +3. Load the expert from perstack.toml using addDelegateFromConfig +4. Run the test query against the expert +5. removeDelegate to unload the expert +6. Execute the completion signal command in the work directory +7. Compare the result against the expected output +8. Append your verification result to plan.md (command run, expected, actual, PASS/FAIL) ## Verdicts -- **PASS** — must signal passes and reproduces. Should signal results reported with counts vs threshold. -- **CONTINUE** — must signal failed or did not reproduce. Include: command, expected, actual, and a fix recommendation **for perstack.toml** (not for the produced artifacts). The deliverable being iterated is the expert definition, not the test output. - -Should signal failures beyond threshold are reported as known limitations but do NOT cause CONTINUE — only the must signal blocks. - -attemptCompletion with: verdict, must signal result, should signal results, reproducibility result, and (if CONTINUE) fix feedback targeting perstack.toml. -""" -delegates = ["@create-expert/test"] - -[experts."@create-expert/verify".skills."@perstack/base"] -type = "mcpStdioSkill" -description = "File operations and task completion" -command = "npx" -packageName = "@perstack/base" -pick = ["readTextFile", "exec", "todo", "attemptCompletion"] - -# ============================================================================= -# test — Test Query Executor (called by verify) -# ============================================================================= - -[experts."@create-expert/test"] -defaultModelTier = "low" -version = "1.0.25" -description = """ -Executes a test query against a Perstack expert and reports what happened. -Provide: (1) path to perstack.toml, (2) the test query, (3) the coordinator expert name. -Returns: factual report. Does not evaluate pass/fail. -""" -instruction = """ -Run a test query against an expert and report exactly what happened. Do NOT evaluate or judge the result. +- **PASS**: completion signal passed. Report the evidence. +- **CONTINUE**: completion signal failed. Report: command, expected, actual, and a fix recommendation targeting perstack.toml (the expert definition), not the produced artifacts. You can ONLY delegate to coordinators (plain names like "game-dev"), NOT to delegates (names starting with @). -1. Create a dedicated work directory for this test run (e.g., test-run-1) -2. Read perstack.toml to understand the expert structure -3. Use addDelegateFromConfig to add the coordinator as a delegate -4. Call the coordinator with the test query -5. removeDelegate to unload the expert - -NEVER delete or modify perstack.toml. By running the expert in a separate work directory, perstack.toml in the parent directory is naturally isolated from the expert's file operations. - -attemptCompletion with: -- **Query**: the test query executed -- **Work directory**: the path where the expert produced its output -- **Produced**: files created/modified, outputs returned, actions taken -- **Errors**: any failures (if none, state "none") +NEVER delete or modify perstack.toml. """ -[experts."@create-expert/test".skills."@perstack/base"] +[experts."@create-expert/verify".skills."@perstack/base"] type = "mcpStdioSkill" description = "File operations, execution, and delegation management" command = "npx" packageName = "@perstack/base" pick = [ "readTextFile", + "writeTextFile", + "editTextFile", "exec", "todo", "attemptCompletion", diff --git a/packages/perstack-toml/package.json b/packages/perstack-toml/package.json index ff23c8a3..b341103f 100644 --- a/packages/perstack-toml/package.json +++ b/packages/perstack-toml/package.json @@ -29,7 +29,8 @@ }, "dependencies": { "@perstack/core": "workspace:*", - "smol-toml": "^1.6.0" + "smol-toml": "^1.6.0", + "zod": "^4.3.6" }, "devDependencies": { "@tsconfig/node22": "^22.0.5", diff --git a/packages/perstack-toml/src/config.ts b/packages/perstack-toml/src/config.ts index 7610e7d8..ce070807 100644 --- a/packages/perstack-toml/src/config.ts +++ b/packages/perstack-toml/src/config.ts @@ -1,6 +1,8 @@ import { readFile } from "node:fs/promises" import path from "node:path" import { + expertSchema, + formatZodError, type PerstackConfig, PerstackError, parseWithFriendlyError, @@ -98,3 +100,63 @@ async function findConfigPathRecursively(cwd: string): Promise { return await findConfigPathRecursively(path.dirname(cwd)) } } + +export async function validatePerstackConfigFile( + filePath: string, +): Promise<{ valid: true } | { valid: false; errors: string[] }> { + const errors: string[] = [] + + // Read file + let content: string + try { + content = await readFile(path.resolve(process.cwd(), filePath), "utf-8") + } catch { + return { valid: false, errors: [`File not found: ${filePath}`] } + } + + // Parse TOML + let toml: Record + try { + toml = TOML.parse(content) as Record + } catch (e) { + const message = e instanceof Error ? e.message : String(e) + return { valid: false, errors: [`TOML parse error: ${message}`] } + } + + // Validate against config schema + const configResult = perstackConfigSchema.safeParse(toml) + if (!configResult.success) { + errors.push(formatZodError(configResult.error)) + return { valid: false, errors } + } + + // Validate each expert against expertSchema (catches delegation scope errors) + const config = configResult.data + if (config.experts) { + for (const [name, expert] of Object.entries(config.experts)) { + const expertResult = expertSchema.safeParse({ + key: name, + name, + version: expert.version ?? "1.0.0", + description: expert.description, + instruction: expert.instruction, + skills: expert.skills, + delegates: expert.delegates, + tags: expert.tags, + defaultModelTier: expert.defaultModelTier, + minRuntimeVersion: expert.minRuntimeVersion, + providerTools: expert.providerTools, + providerSkills: expert.providerSkills, + providerToolOptions: expert.providerToolOptions, + }) + if (!expertResult.success) { + errors.push(`Expert "${name}": ${formatZodError(expertResult.error)}`) + } + } + } + + if (errors.length > 0) { + return { valid: false, errors } + } + return { valid: true } +} diff --git a/packages/perstack-toml/src/index.ts b/packages/perstack-toml/src/index.ts index f91df4db..4cec7a59 100644 --- a/packages/perstack-toml/src/index.ts +++ b/packages/perstack-toml/src/index.ts @@ -3,5 +3,7 @@ export { getPerstackConfig, getPerstackConfigOrDefault, parsePerstackConfig, + validatePerstackConfigFile, } from "./config.js" export { findLockfile, generateLockfileToml, loadLockfile } from "./lockfile.js" +export { printPerstackSchema } from "./schema-printer.js" diff --git a/packages/perstack-toml/src/schema-printer.ts b/packages/perstack-toml/src/schema-printer.ts new file mode 100644 index 00000000..06a31b08 --- /dev/null +++ b/packages/perstack-toml/src/schema-printer.ts @@ -0,0 +1,131 @@ +import { perstackConfigSchema } from "@perstack/core" +import { z } from "zod" + +interface JsonSchemaProperty { + type?: string | string[] + const?: string + enum?: string[] + items?: JsonSchemaProperty + properties?: Record + required?: string[] + oneOf?: JsonSchemaProperty[] + additionalProperties?: JsonSchemaProperty | boolean + format?: string + pattern?: string + minimum?: number + maximum?: number + minLength?: number + maxLength?: number +} + +function formatType(prop: JsonSchemaProperty): string { + if (prop.const) return `"${prop.const}"` + if (prop.enum) return prop.enum.map((v) => `"${v}"`).join(" | ") + if (prop.oneOf) { + const types = prop.oneOf.map(formatType).filter(Boolean) + return types.join(" | ") + } + if (prop.type === "array" && prop.items) { + return `${formatType(prop.items)}[]` + } + if (Array.isArray(prop.type)) { + return prop.type.join(" | ") + } + return prop.type ?? "any" +} + +function formatConstraints(prop: JsonSchemaProperty): string { + const parts: string[] = [] + if (prop.minLength !== undefined) parts.push(`min ${prop.minLength}`) + if (prop.maxLength !== undefined) parts.push(`max ${prop.maxLength}`) + if (prop.minimum !== undefined) parts.push(`min ${prop.minimum}`) + if (prop.maximum !== undefined) parts.push(`max ${prop.maximum}`) + if (prop.pattern) parts.push(`pattern: ${prop.pattern}`) + return parts.length > 0 ? ` (${parts.join(", ")})` : "" +} + +function printObjectFields( + properties: Record, + required: string[], + indent = "", +): string { + const lines: string[] = [] + for (const [name, prop] of Object.entries(properties)) { + const isRequired = required.includes(name) + const reqLabel = isRequired ? "required" : "optional" + const type = formatType(prop) + const constraints = formatConstraints(prop) + lines.push(`${indent}${name.padEnd(24)} ${type.padEnd(20)} (${reqLabel})${constraints}`) + } + return lines.join("\n") +} + +export function printPerstackSchema(): string { + const jsonSchema = z.toJSONSchema(perstackConfigSchema, { + unrepresentable: "any", + }) as JsonSchemaProperty + + const sections: string[] = [] + + sections.push("# perstack.toml Schema Reference") + sections.push("") + + // Top-level fields + if (jsonSchema.properties) { + sections.push("## Top-level fields") + sections.push("") + const topLevel = { ...jsonSchema.properties } + delete topLevel.experts // handled separately + sections.push(printObjectFields(topLevel, jsonSchema.required ?? [])) + sections.push("") + } + + // Expert fields + const expertsSchema = jsonSchema.properties?.experts + const expertValueSchema = expertsSchema?.additionalProperties as JsonSchemaProperty | undefined + if (expertValueSchema?.properties) { + sections.push('## Expert definition: [experts."expert-name"]') + sections.push("") + sections.push(printObjectFields(expertValueSchema.properties, expertValueSchema.required ?? [])) + sections.push("") + + // Skill types + const skillsSchema = expertValueSchema.properties.skills + const skillValueSchema = skillsSchema?.additionalProperties as JsonSchemaProperty | undefined + if (skillValueSchema?.oneOf) { + sections.push('## Skill types: [experts."name".skills."skill-name"]') + sections.push("") + for (const variant of skillValueSchema.oneOf) { + if (variant.properties?.type?.const) { + sections.push(`### ${variant.properties.type.const}`) + sections.push("") + sections.push(printObjectFields(variant.properties, variant.required ?? [], " ")) + sections.push("") + } + } + } + } + + // Supplementary info not in Zod schema + sections.push("## @perstack/base available tools") + sections.push("") + sections.push("Basic tools:") + sections.push(" readTextFile, readImageFile, readPdfFile, writeTextFile, editTextFile,") + sections.push(" exec, todo, clearTodo, attemptCompletion") + sections.push("") + sections.push("Skill & delegation management tools:") + sections.push(" addSkill, removeSkill, addDelegate, removeDelegate,") + sections.push(" createExpert, addDelegateFromConfig") + sections.push("") + + sections.push("## Naming rules") + sections.push("") + sections.push('- Coordinators: kebab-case (e.g., "my-expert")') + sections.push('- Delegates: @coordinator/name (e.g., "@my-expert/verify")') + sections.push("- No self-delegation") + sections.push("- Delegates must be in-scope (same coordinator prefix)") + sections.push("- Delegates cannot delegate to their own coordinator") + sections.push("") + + return sections.join("\n") +}