diff --git a/src/index.ts b/src/index.ts index af816d2..bd8d851 100644 --- a/src/index.ts +++ b/src/index.ts @@ -62,6 +62,7 @@ import { import { CONTEXT_RESOURCE_URI, isContextResourceUri } from './resources/uri.js'; import { assessSearchQuality } from './core/search-quality.js'; import { findSymbolReferences } from './core/symbol-references.js'; +import { TOOLS, dispatchTool, type ToolContext } from './tools/index.js'; analyzerRegistry.register(new AngularAnalyzer()); analyzerRegistry.register(new GenericAnalyzer()); @@ -195,268 +196,6 @@ const server: Server = new Server( } ); -const TOOLS: Tool[] = [ - { - name: 'search_codebase', - description: - 'Search the indexed codebase. Returns ranked results and a searchQuality confidence summary. ' + - 'IMPORTANT: Pass the intent="edit"|"refactor"|"migrate" to get preflight: edit readiness check with evidence gating.', - inputSchema: { - type: 'object', - properties: { - query: { - type: 'string', - description: 'Natural language search query' - }, - intent: { - type: 'string', - enum: ['explore', 'edit', 'refactor', 'migrate'], - description: - 'Optional. Use "edit", "refactor", or "migrate" to get the full preflight card before making changes.' - }, - limit: { - type: 'number', - description: 'Maximum number of results to return (default: 5)', - default: 5 - }, - includeSnippets: { - type: 'boolean', - description: - 'Include code snippets in results (default: false). If you need code, prefer read_file instead.', - default: false - }, - filters: { - type: 'object', - description: 'Optional filters', - properties: { - framework: { - type: 'string', - description: 'Filter by framework (angular, react, vue)' - }, - language: { - type: 'string', - description: 'Filter by programming language' - }, - componentType: { - type: 'string', - description: 'Filter by component type (component, service, directive, etc.)' - }, - layer: { - type: 'string', - description: - 'Filter by architectural layer (presentation, business, data, state, core, shared)' - }, - tags: { - type: 'array', - items: { type: 'string' }, - description: 'Filter by tags' - } - } - } - }, - required: ['query'] - } - }, - { - name: 'get_codebase_metadata', - description: - 'Get codebase metadata including framework information, dependencies, architecture patterns, ' + - 'and project statistics.', - inputSchema: { - type: 'object', - properties: {} - } - }, - { - name: 'get_indexing_status', - description: - 'Get current indexing status: state, statistics, and progress. ' + - 'Use refresh_index to manually trigger re-indexing when needed.', - inputSchema: { - type: 'object', - properties: {} - } - }, - { - name: 'refresh_index', - description: - 'Re-index the codebase. Supports full re-index or incremental mode. ' + - 'Use incrementalOnly=true to only process files changed since last index.', - inputSchema: { - type: 'object', - properties: { - reason: { - type: 'string', - description: 'Reason for refreshing the index (for logging)' - }, - incrementalOnly: { - type: 'boolean', - description: - 'If true, only re-index files changed since last full index (faster). Default: false (full re-index)' - } - } - } - }, - - { - name: 'get_style_guide', - description: 'Query style guide rules and architectural patterns from project documentation.', - inputSchema: { - type: 'object', - properties: { - query: { - type: 'string', - description: - 'Query for specific style guide rules (e.g., "component naming", "service patterns")' - }, - category: { - type: 'string', - description: 'Filter by category (naming, structure, patterns, testing)' - } - } - } - }, - { - name: 'get_team_patterns', - description: - 'Get actionable team pattern recommendations based on codebase analysis. ' + - 'Returns consensus patterns for DI, state management, testing, library wrappers, etc.', - inputSchema: { - type: 'object', - properties: { - category: { - type: 'string', - description: 'Pattern category to retrieve', - enum: ['all', 'di', 'state', 'testing', 'libraries'] - } - } - } - }, - { - name: 'get_symbol_references', - description: - 'Find concrete references to a symbol in indexed chunks. Returns total usageCount and top usage snippets.', - inputSchema: { - type: 'object', - properties: { - symbol: { - type: 'string', - description: - 'Symbol name to find references for (for example: parseConfig or UserService)' - }, - limit: { - type: 'number', - description: 'Maximum number of usage snippets to return (default: 10)', - default: 10 - } - }, - required: ['symbol'] - } - }, - { - name: 'get_component_usage', - description: - 'Find WHERE a library or component is used in the codebase. ' + - "This is 'Find Usages' - returns all files that import a given package/module. " + - "Example: get_component_usage('@mycompany/utils') -> shows all files using it.", - inputSchema: { - type: 'object', - properties: { - name: { - type: 'string', - description: - "Import source to find usages for (e.g., 'primeng/table', '@mycompany/ui/button', 'lodash')" - } - }, - required: ['name'] - } - }, - { - name: 'detect_circular_dependencies', - description: - 'Analyze the import graph to detect circular dependencies between files. ' + - 'Circular dependencies can cause initialization issues, tight coupling, and maintenance problems. ' + - 'Returns all detected cycles sorted by length (shorter cycles are often more problematic).', - inputSchema: { - type: 'object', - properties: { - scope: { - type: 'string', - description: - "Optional path prefix to limit analysis (e.g., 'src/features', 'libs/shared')" - } - } - } - }, - { - name: 'remember', - description: - 'CALL IMMEDIATELY when user explicitly asks to remember/record something.\n\n' + - 'USER TRIGGERS:\n' + - '- "Remember this: [X]"\n' + - '- "Record this: [Y]"\n' + - '- "Save this for next time: [Z]"\n\n' + - 'DO NOT call unless user explicitly requests it.\n\n' + - 'HOW TO WRITE:\n' + - '- ONE convention per memory (if user lists 5 things, call this 5 times)\n' + - '- memory: 5-10 words (the specific rule)\n' + - '- reason: 1 sentence (why it matters)\n' + - '- Skip: one-time features, code examples, essays', - inputSchema: { - type: 'object', - properties: { - type: { - type: 'string', - enum: ['convention', 'decision', 'gotcha', 'failure'], - description: - 'Type of memory being recorded. Use "failure" for things that were tried and failed - ' + - 'prevents repeating the same mistakes.' - }, - category: { - type: 'string', - description: 'Broader category for filtering', - enum: ['tooling', 'architecture', 'testing', 'dependencies', 'conventions'] - }, - memory: { - type: 'string', - description: 'What to remember (concise)' - }, - reason: { - type: 'string', - description: 'Why this matters or what breaks otherwise' - } - }, - required: ['type', 'category', 'memory', 'reason'] - } - }, - { - name: 'get_memory', - description: - 'Retrieves team conventions, architectural decisions, and known gotchas.\n' + - 'CALL BEFORE suggesting patterns, libraries, or architecture.\n\n' + - 'Filters: category (tooling/architecture/testing/dependencies/conventions), type (convention/decision/gotcha), query (keyword search).', - inputSchema: { - type: 'object', - properties: { - category: { - type: 'string', - description: 'Filter by category', - enum: ['tooling', 'architecture', 'testing', 'dependencies', 'conventions'] - }, - type: { - type: 'string', - description: 'Filter by memory type', - enum: ['convention', 'decision', 'gotcha', 'failure'] - }, - query: { - type: 'string', - description: 'Keyword search across memory and reason' - } - } - } - } -]; - server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools: TOOLS }; }); @@ -748,1373 +487,13 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { const { name, arguments: args } = request.params; try { - switch (name) { - case 'search_codebase': { - const { query, limit, filters, intent, includeSnippets } = args as any; - const queryStr = typeof query === 'string' ? query.trim() : ''; - - if (!queryStr) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - errorCode: 'invalid_params', - message: "Invalid params: 'query' is required and must be a non-empty string.", - hint: "Provide a query like 'how are routes configured' or 'AlbumApiService'." - }, - null, - 2 - ) - } - ], - isError: true - }; - } - - if (indexState.status === 'indexing') { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'indexing', - message: 'Index is still being built. Retry in a moment.', - progress: indexState.indexer?.getProgress() - }, - null, - 2 - ) - } - ] - }; - } - - if (indexState.status === 'error') { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: `Indexing failed: ${indexState.error}` - }, - null, - 2 - ) - } - ] - }; - } - - const searcher = new CodebaseSearcher(ROOT_PATH); - let results: SearchResult[]; - const searchProfile = - intent && ['explore', 'edit', 'refactor', 'migrate'].includes(intent) - ? intent - : 'explore'; - - try { - results = await searcher.search(queryStr, limit || 5, filters, { - profile: searchProfile - }); - } catch (error) { - if (error instanceof IndexCorruptedError) { - console.error('[Auto-Heal] Index corrupted. Triggering full re-index...'); - - await performIndexing(); - - if (indexState.status === 'ready') { - console.error('[Auto-Heal] Success. Retrying search...'); - const freshSearcher = new CodebaseSearcher(ROOT_PATH); - try { - results = await freshSearcher.search(queryStr, limit || 5, filters, { - profile: searchProfile - }); - } catch (retryError) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: `Auto-heal retry failed: ${ - retryError instanceof Error ? retryError.message : String(retryError) - }` - }, - null, - 2 - ) - } - ] - }; - } - } else { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: `Auto-heal failed: Indexing ended with status '${indexState.status}'`, - error: indexState.error - }, - null, - 2 - ) - } - ] - }; - } - } else { - throw error; // Propagate unexpected errors - } - } - - // Load memories for keyword matching, enriched with confidence - const allMemories = await readMemoriesFile(PATHS.memory); - const allMemoriesWithConf = withConfidence(allMemories); - - const queryTerms = queryStr.toLowerCase().split(/\s+/).filter(Boolean); - const relatedMemories = allMemoriesWithConf - .filter((m) => { - const searchText = `${m.memory} ${m.reason}`.toLowerCase(); - return queryTerms.some((term: string) => searchText.includes(term)); - }) - .sort((a, b) => b.effectiveConfidence - a.effectiveConfidence); - - // Load intelligence data for enrichment (all intents, not just preflight) - let intelligence: any = null; - try { - const intelligenceContent = await fs.readFile(PATHS.intelligence, 'utf-8'); - intelligence = JSON.parse(intelligenceContent); - } catch { - /* graceful degradation — intelligence file may not exist yet */ - } - - function computeIndexConfidence(): 'fresh' | 'aging' | 'stale' { - let confidence: 'fresh' | 'aging' | 'stale' = 'stale'; - if (intelligence?.generatedAt) { - const indexAge = Date.now() - new Date(intelligence.generatedAt).getTime(); - const hoursOld = indexAge / (1000 * 60 * 60); - if (hoursOld < 24) { - confidence = 'fresh'; - } else if (hoursOld < 168) { - confidence = 'aging'; - } - } - return confidence; - } - - // Cheap impact breadth estimate from the import graph (used for risk assessment). - function computeImpactCandidates(resultPaths: string[]): string[] { - const impactCandidates: string[] = []; - if (!intelligence?.internalFileGraph?.imports) return impactCandidates; - const allImports = intelligence.internalFileGraph.imports as Record; - for (const [file, deps] of Object.entries(allImports)) { - if ( - deps.some((dep: string) => - resultPaths.some((rp) => dep.endsWith(rp) || rp.endsWith(dep)) - ) - ) { - if (!resultPaths.some((rp) => file.endsWith(rp) || rp.endsWith(file))) { - impactCandidates.push(file); - } - } - } - return impactCandidates; - } - - // Build reverse import map from intelligence graph - const reverseImports = new Map(); - if (intelligence?.internalFileGraph?.imports) { - for (const [file, deps] of Object.entries( - intelligence.internalFileGraph.imports - )) { - for (const dep of deps) { - if (!reverseImports.has(dep)) reverseImports.set(dep, []); - reverseImports.get(dep)!.push(file); - } - } - } - - // Enrich a search result with relationship data - function enrichResult(r: SearchResult): RelationshipData | undefined { - const rPath = r.filePath; - - // importedBy: files that import this result (reverse lookup) - const importedBy: string[] = []; - for (const [dep, importers] of reverseImports) { - if (dep.endsWith(rPath) || rPath.endsWith(dep)) { - importedBy.push(...importers); - } - } - - // imports: files this result depends on (forward lookup) - const imports: string[] = []; - if (intelligence?.internalFileGraph?.imports) { - for (const [file, deps] of Object.entries( - intelligence.internalFileGraph.imports - )) { - if (file.endsWith(rPath) || rPath.endsWith(file)) { - imports.push(...deps); - } - } - } - - // testedIn: heuristic — same basename with .spec/.test extension - const testedIn: string[] = []; - const baseName = path.basename(rPath).replace(/\.[^.]+$/, ''); - if (intelligence?.internalFileGraph?.imports) { - for (const file of Object.keys(intelligence.internalFileGraph.imports)) { - const fileBase = path.basename(file); - if ( - (fileBase.includes('.spec.') || fileBase.includes('.test.')) && - fileBase.startsWith(baseName) - ) { - testedIn.push(file); - } - } - } - - // Only return if we have at least one piece of data - if (importedBy.length === 0 && imports.length === 0 && testedIn.length === 0) { - return undefined; - } - - return { - ...(importedBy.length > 0 && { importedBy }), - ...(imports.length > 0 && { imports }), - ...(testedIn.length > 0 && { testedIn }) - }; - } - - const searchQuality = assessSearchQuality(query, results); - - // Always-on edit preflight (lite): do not require intent and keep payload small. - let editPreflight: any = undefined; - if (intelligence && (!intent || intent === 'explore')) { - try { - const resultPaths = results.map((r) => r.filePath); - const impactCandidates = computeImpactCandidates(resultPaths); - - let riskLevel: 'low' | 'medium' | 'high' = 'low'; - if (impactCandidates.length > 10) { - riskLevel = 'high'; - } else if (impactCandidates.length > 3) { - riskLevel = 'medium'; - } - - // Use existing pattern intelligence for evidenceLock scoring, but keep the output payload lite. - const preferredPatternsForEvidence: Array<{ pattern: string; example?: string }> = []; - const patterns = intelligence.patterns || {}; - for (const [_, data] of Object.entries(patterns)) { - if (data.primary) { - const p = data.primary; - if (p.trend === 'Rising' || p.trend === 'Stable') { - preferredPatternsForEvidence.push({ - pattern: p.name, - ...(p.canonicalExample && { example: p.canonicalExample.file }) - }); - } - } - } - - editPreflight = { - mode: 'lite', - riskLevel, - confidence: computeIndexConfidence(), - evidenceLock: buildEvidenceLock({ - results, - preferredPatterns: preferredPatternsForEvidence.slice(0, 5), - relatedMemories, - failureWarnings: [], - patternConflicts: [], - searchQualityStatus: searchQuality.status - }) - }; - } catch { - // editPreflight is best-effort - never fail search over it - } - } - - // Compose preflight card for edit/refactor/migrate intents - let preflight: any = undefined; - const preflightIntents = ['edit', 'refactor', 'migrate']; - if (intent && preflightIntents.includes(intent) && intelligence) { - try { - // --- Avoid / Prefer patterns --- - const avoidPatterns: any[] = []; - const preferredPatterns: any[] = []; - const patterns = intelligence.patterns || {}; - for (const [category, data] of Object.entries(patterns)) { - // Primary pattern = preferred if Rising or Stable - if (data.primary) { - const p = data.primary; - if (p.trend === 'Rising' || p.trend === 'Stable') { - preferredPatterns.push({ - pattern: p.name, - category, - adoption: p.frequency, - trend: p.trend, - guidance: p.guidance, - ...(p.canonicalExample && { example: p.canonicalExample.file }) - }); - } - } - // Also-detected patterns that are Declining = avoid - if (data.alsoDetected) { - for (const alt of data.alsoDetected) { - if (alt.trend === 'Declining') { - avoidPatterns.push({ - pattern: alt.name, - category, - adoption: alt.frequency, - trend: 'Declining', - guidance: alt.guidance - }); - } - } - } - } - - // --- Impact candidates (files importing the result files) --- - const resultPaths = results.map((r) => r.filePath); - const impactCandidates = computeImpactCandidates(resultPaths); - - // --- Risk level (based on circular deps + impact breadth) --- - let riskLevel: 'low' | 'medium' | 'high' = 'low'; - let cycleCount = 0; - if (intelligence.internalFileGraph) { - try { - const graph = InternalFileGraph.fromJSON(intelligence.internalFileGraph, ROOT_PATH); - // Use directory prefixes as scope (not full file paths) - // findCycles(scope) filters files by startsWith, so a full path would only match itself - const scopes = new Set( - resultPaths.map((rp) => { - const lastSlash = rp.lastIndexOf('/'); - return lastSlash > 0 ? rp.substring(0, lastSlash + 1) : rp; - }) - ); - for (const scope of scopes) { - const cycles = graph.findCycles(scope); - cycleCount += cycles.length; - } - } catch { - // Graph reconstruction failed — skip cycle check - } - } - if (cycleCount > 0 || impactCandidates.length > 10) { - riskLevel = 'high'; - } else if (impactCandidates.length > 3) { - riskLevel = 'medium'; - } - - // --- Golden files (exemplar code) --- - const goldenFiles = (intelligence.goldenFiles || []).slice(0, 3).map((g: any) => ({ - file: g.file, - score: g.score - })); - - // --- Confidence (index freshness) --- - const confidence = computeIndexConfidence(); - - // --- Failure memories (1.5x relevance boost) --- - const failureWarnings = relatedMemories - .filter((m) => m.type === 'failure' && !m.stale) - .map((m) => ({ - memory: m.memory, - reason: m.reason, - confidence: m.effectiveConfidence - })) - .slice(0, 3); - - const preferredPatternsForOutput = preferredPatterns.slice(0, 5); - const avoidPatternsForOutput = avoidPatterns.slice(0, 5); - - // --- Pattern conflicts (split decisions within categories) --- - const patternConflicts: Array<{ - category: string; - primary: { name: string; adoption: string }; - alternative: { name: string; adoption: string }; - }> = []; - const hasUnitTestFramework = Boolean((patterns as any).unitTestFramework?.primary); - for (const [cat, data] of Object.entries(patterns)) { - if (shouldSkipLegacyTestingFrameworkCategory(cat, patterns as any)) continue; - if (!shouldIncludePatternConflictCategory(cat, query)) continue; - if (!data.primary || !data.alsoDetected?.length) continue; - const primaryFreq = parseFloat(data.primary.frequency) || 100; - if (primaryFreq >= 80) continue; - for (const alt of data.alsoDetected) { - const altFreq = parseFloat(alt.frequency) || 0; - if (altFreq >= 20) { - if (isComplementaryPatternConflict(cat, data.primary.name, alt.name)) continue; - if (hasUnitTestFramework && cat === 'testingFramework') continue; - patternConflicts.push({ - category: cat, - primary: { name: data.primary.name, adoption: data.primary.frequency }, - alternative: { name: alt.name, adoption: alt.frequency } - }); - } - } - } - - const evidenceLock = buildEvidenceLock({ - results, - preferredPatterns: preferredPatternsForOutput, - relatedMemories, - failureWarnings, - patternConflicts, - searchQualityStatus: searchQuality.status - }); - - // Bump risk if there are active failure memories for this area - if (failureWarnings.length > 0 && riskLevel === 'low') { - riskLevel = 'medium'; - } - - // If evidence triangulation is weak, avoid claiming low risk - if (evidenceLock.status === 'block' && riskLevel === 'low') { - riskLevel = 'medium'; - } - - // If epistemic stress says abstain, bump risk - if (evidenceLock.epistemicStress?.abstain && riskLevel === 'low') { - riskLevel = 'medium'; - } - - preflight = { - intent, - riskLevel, - confidence, - evidenceLock, - ...(preferredPatternsForOutput.length > 0 && { - preferredPatterns: preferredPatternsForOutput - }), - ...(avoidPatternsForOutput.length > 0 && { - avoidPatterns: avoidPatternsForOutput - }), - ...(goldenFiles.length > 0 && { goldenFiles }), - ...(impactCandidates.length > 0 && { - impactCandidates: impactCandidates.slice(0, 10) - }), - ...(cycleCount > 0 && { circularDependencies: cycleCount }), - ...(failureWarnings.length > 0 && { failureWarnings }) - }; - } catch { - // Preflight construction failed — skip preflight, don't fail the search - } - } - - // For edit/refactor/migrate: return full preflight card (risk, patterns, impact, etc.). - // For explore or lite-only: return flattened { ready, reason }. - let preflightPayload: - | { ready: boolean; reason?: string } - | Record - | undefined; - if (preflight) { - const el = preflight.evidenceLock; - // Full card per tool schema; add top-level ready/reason for backward compatibility - preflightPayload = { - ...preflight, - ready: el?.readyToEdit ?? false, - ...(el && !el.readyToEdit && el.nextAction && { reason: el.nextAction }) - }; - } else if (editPreflight) { - const el = editPreflight.evidenceLock; - preflightPayload = { - ready: el?.readyToEdit ?? false, - ...(el && !el.readyToEdit && el.nextAction && { reason: el.nextAction }) - }; - } - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - searchQuality: { - status: searchQuality.status, - confidence: searchQuality.confidence, - ...(searchQuality.status === 'low_confidence' && - searchQuality.nextSteps?.[0] && { - hint: searchQuality.nextSteps[0] - }) - }, - ...(preflightPayload && { preflight: preflightPayload }), - results: results.map((r) => { - const relationships = enrichResult(r); - // Condensed relationships: importedBy count + hasTests flag - const condensedRel = relationships - ? { - ...(relationships.importedBy && - relationships.importedBy.length > 0 && { - importedByCount: relationships.importedBy.length - }), - ...(relationships.testedIn && - relationships.testedIn.length > 0 && { hasTests: true }) - } - : undefined; - const hasCondensedRel = condensedRel && Object.keys(condensedRel).length > 0; - - return { - file: `${r.filePath}:${r.startLine}-${r.endLine}`, - summary: r.summary, - score: Math.round(r.score * 100) / 100, - ...(r.componentType && r.layer && { type: `${r.componentType}:${r.layer}` }), - ...(r.trend && r.trend !== 'Stable' && { trend: r.trend }), - ...(r.patternWarning && { patternWarning: r.patternWarning }), - ...(hasCondensedRel && { relationships: condensedRel }), - ...(includeSnippets && r.snippet && { snippet: r.snippet }) - }; - }), - totalResults: results.length, - ...(relatedMemories.length > 0 && { - relatedMemories: relatedMemories - .slice(0, 3) - .map((m) => `${m.memory} (${m.effectiveConfidence})`) - }) - }, - null, - 2 - ) - } - ] - }; - } - - case 'get_indexing_status': { - const progress = indexState.indexer?.getProgress(); - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: indexState.status, - rootPath: ROOT_PATH, - lastIndexed: indexState.lastIndexed?.toISOString(), - stats: indexState.stats - ? { - totalFiles: indexState.stats.totalFiles, - indexedFiles: indexState.stats.indexedFiles, - totalChunks: indexState.stats.totalChunks, - duration: `${(indexState.stats.duration / 1000).toFixed(2)}s`, - incremental: indexState.stats.incremental - } - : undefined, - progress: progress - ? { - phase: progress.phase, - percentage: progress.percentage, - filesProcessed: progress.filesProcessed, - totalFiles: progress.totalFiles - } - : undefined, - error: indexState.error, - hint: 'Use refresh_index to manually trigger re-indexing when needed.' - }, - null, - 2 - ) - } - ] - }; - } - - case 'refresh_index': { - const { reason, incrementalOnly } = args as { reason?: string; incrementalOnly?: boolean }; - - const mode = incrementalOnly ? 'incremental' : 'full'; - console.error(`Refresh requested (${mode}): ${reason || 'Manual trigger'}`); - - performIndexing(incrementalOnly); - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'started', - mode, - message: incrementalOnly - ? 'Incremental re-indexing started. Only changed files will be re-embedded.' - : 'Full re-indexing started. Check status with get_indexing_status.', - reason - }, - null, - 2 - ) - } - ] - }; - } - - case 'get_codebase_metadata': { - const indexer = new CodebaseIndexer({ rootPath: ROOT_PATH }); - const metadata = await indexer.detectMetadata(); - - // Load team patterns from intelligence file - let teamPatterns = {}; - try { - const intelligencePath = PATHS.intelligence; - const intelligenceContent = await fs.readFile(intelligencePath, 'utf-8'); - const intelligence = JSON.parse(intelligenceContent); - - if (intelligence.patterns) { - teamPatterns = { - dependencyInjection: intelligence.patterns.dependencyInjection, - stateManagement: intelligence.patterns.stateManagement, - componentInputs: intelligence.patterns.componentInputs - }; - } - } catch (_error) { - // No intelligence file or parsing error - } - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - metadata: { - name: metadata.name, - framework: metadata.framework, - languages: metadata.languages, - dependencies: metadata.dependencies.slice(0, 20), - architecture: metadata.architecture, - projectStructure: metadata.projectStructure, - statistics: metadata.statistics, - teamPatterns - } - }, - null, - 2 - ) - } - ] - }; - } - - case 'get_style_guide': { - const { query, category } = args as { - query?: string; - category?: string; - }; - const queryStr = typeof query === 'string' ? query.trim() : ''; - const queryLower = queryStr.toLowerCase(); - const queryTerms = queryLower.split(/\s+/).filter(Boolean); - const categoryLower = typeof category === 'string' ? category.trim().toLowerCase() : ''; - const limitedMode = queryTerms.length === 0; - const LIMITED_MAX_FILES = 3; - const LIMITED_MAX_SECTIONS_PER_FILE = 2; - - const styleGuidePatterns = [ - 'STYLE_GUIDE.md', - 'CODING_STYLE.md', - 'ARCHITECTURE.md', - 'CONTRIBUTING.md', - 'docs/style-guide.md', - 'docs/coding-style.md', - 'docs/ARCHITECTURE.md' - ]; - - const foundGuides: Array<{ - file: string; - content: string; - relevantSections: string[]; - }> = []; - - for (const pattern of styleGuidePatterns) { - try { - const files = await glob(pattern, { - cwd: ROOT_PATH, - absolute: true - }); - for (const file of files) { - try { - // Normalize line endings to \n for consistent output - const rawContent = await fs.readFile(file, 'utf-8'); - const content = rawContent.replace(/\r\n/g, '\n'); - const relativePath = path.relative(ROOT_PATH, file); - - // Find relevant sections based on query - const sections = content.split(/^##\s+/m); - const relevantSections: string[] = []; - if (limitedMode) { - const headings = (content.match(/^##\s+.+$/gm) || []) - .map((h) => h.trim()) - .filter(Boolean) - .slice(0, LIMITED_MAX_SECTIONS_PER_FILE); - - if (headings.length > 0) { - relevantSections.push(...headings); - } else { - const words = content.split(/\s+/).filter(Boolean); - if (words.length > 0) { - relevantSections.push(`Overview: ${words.slice(0, 80).join(' ')}...`); - } - } - } else { - for (const section of sections) { - const sectionLower = section.toLowerCase(); - const isRelevant = queryTerms.some((term) => sectionLower.includes(term)); - if (isRelevant) { - // Limit section size to ~500 words - const words = section.split(/\s+/); - const truncated = words.slice(0, 500).join(' '); - relevantSections.push( - '## ' + (words.length > 500 ? truncated + '...' : section.trim()) - ); - } - } - } - - const categoryMatch = - !categoryLower || - relativePath.toLowerCase().includes(categoryLower) || - relevantSections.some((section) => section.toLowerCase().includes(categoryLower)); - if (!categoryMatch) { - continue; - } - - if (relevantSections.length > 0) { - foundGuides.push({ - file: relativePath, - content: content.slice(0, 200) + '...', - relevantSections: relevantSections.slice( - 0, - limitedMode ? LIMITED_MAX_SECTIONS_PER_FILE : 3 - ) - }); - } - } catch (_e) { - // Skip unreadable files - } - } - } catch (_e) { - // Pattern didn't match, continue - } - } - - const results = limitedMode ? foundGuides.slice(0, LIMITED_MAX_FILES) : foundGuides; - - if (results.length === 0) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'no_results', - message: limitedMode - ? 'No style guide files found in the default locations.' - : `No style guide content found matching: ${queryStr}`, - searchedPatterns: styleGuidePatterns, - hint: limitedMode - ? "Run get_style_guide with a query or category (e.g. category: 'testing') for targeted results." - : "Try broader terms like 'naming', 'patterns', 'testing', 'components'" - }, - null, - 2 - ) - } - ] - }; - } - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - query: queryStr || undefined, - category, - limited: limitedMode, - notice: limitedMode - ? 'No query provided. Results are capped. Provide query and/or category for targeted guidance.' - : undefined, - resultLimits: limitedMode - ? { - maxFiles: LIMITED_MAX_FILES, - maxSectionsPerFile: LIMITED_MAX_SECTIONS_PER_FILE - } - : undefined, - results, - totalFiles: results.length, - totalMatches: foundGuides.length - }, - null, - 2 - ) - } - ] - }; - } - case 'get_team_patterns': { - const { category } = args as { category?: string }; - - try { - const intelligencePath = PATHS.intelligence; - const content = await fs.readFile(intelligencePath, 'utf-8'); - const intelligence = JSON.parse(content); - - const result: any = { status: 'success' }; - - if (category === 'all' || !category) { - result.patterns = intelligence.patterns || {}; - result.goldenFiles = intelligence.goldenFiles || []; - if (intelligence.tsconfigPaths) { - result.tsconfigPaths = intelligence.tsconfigPaths; - } - } else if (category === 'di') { - result.dependencyInjection = intelligence.patterns?.dependencyInjection; - } else if (category === 'state') { - result.stateManagement = intelligence.patterns?.stateManagement; - } else if (category === 'testing') { - result.unitTestFramework = intelligence.patterns?.unitTestFramework; - result.e2eFramework = intelligence.patterns?.e2eFramework; - result.testingFramework = intelligence.patterns?.testingFramework; - result.testMocking = intelligence.patterns?.testMocking; - } else if (category === 'libraries') { - result.topUsed = intelligence.importGraph?.topUsed || []; - if (intelligence.tsconfigPaths) { - result.tsconfigPaths = intelligence.tsconfigPaths; - } - } - - // Load and append matching memories - try { - const allMemories = await readMemoriesFile(PATHS.memory); - - // Map pattern categories to decision categories - const categoryMap: Record = { - all: ['tooling', 'architecture', 'testing', 'dependencies', 'conventions'], - di: ['architecture', 'conventions'], - state: ['architecture', 'conventions'], - testing: ['testing'], - libraries: ['dependencies'] - }; - - const relevantCategories = categoryMap[category || 'all'] || []; - const matchingMemories = allMemories.filter((m) => - relevantCategories.includes(m.category) - ); - - if (matchingMemories.length > 0) { - result.memories = matchingMemories; - } - } catch (_error) { - // No memory file yet, that's fine - don't fail the whole request - } - - // Detect pattern conflicts: primary < 80% and any alternative > 20% - const conflicts: any[] = []; - const patternsData = intelligence.patterns || {}; - const hasUnitTestFramework = Boolean(patternsData.unitTestFramework?.primary); - for (const [cat, data] of Object.entries(patternsData)) { - if (shouldSkipLegacyTestingFrameworkCategory(cat, patternsData)) continue; - if (category && category !== 'all' && cat !== category) continue; - if (!data.primary || !data.alsoDetected?.length) continue; - - const primaryFreq = parseFloat(data.primary.frequency) || 100; - if (primaryFreq >= 80) continue; - - for (const alt of data.alsoDetected) { - const altFreq = parseFloat(alt.frequency) || 0; - if (altFreq < 20) continue; - if (isComplementaryPatternConflict(cat, data.primary.name, alt.name)) continue; - if (hasUnitTestFramework && cat === 'testingFramework') continue; - - conflicts.push({ - category: cat, - primary: { - name: data.primary.name, - adoption: data.primary.frequency, - trend: data.primary.trend - }, - alternative: { - name: alt.name, - adoption: alt.frequency, - trend: alt.trend - }, - note: `Split decision: ${data.primary.frequency} ${data.primary.name} (${data.primary.trend || 'unknown'}) vs ${alt.frequency} ${alt.name} (${alt.trend || 'unknown'})` - }); - } - } - if (conflicts.length > 0) { - result.conflicts = conflicts; - } - - return { - content: [{ type: 'text', text: JSON.stringify(result, null, 2) }] - }; - } catch (error) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: 'Failed to load team patterns', - error: error instanceof Error ? error.message : String(error) - }, - null, - 2 - ) - } - ] - }; - } - } - - case 'get_symbol_references': { - const { symbol, limit } = args as { symbol?: unknown; limit?: unknown }; - const normalizedSymbol = typeof symbol === 'string' ? symbol.trim() : ''; - const normalizedLimit = - typeof limit === 'number' && Number.isFinite(limit) && limit > 0 ? Math.floor(limit) : 10; - - if (!normalizedSymbol) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: "Invalid params: 'symbol' is required and must be a non-empty string." - }, - null, - 2 - ) - } - ], - isError: true - }; - } - - const result = await findSymbolReferences(ROOT_PATH, normalizedSymbol, normalizedLimit); - - if (result.status === 'error') { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - symbol: normalizedSymbol, - message: result.message - }, - null, - 2 - ) - } - ] - }; - } - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - symbol: result.symbol, - usageCount: result.usageCount, - usages: result.usages - }, - null, - 2 - ) - } - ] - }; - } - - case 'get_component_usage': { - const { name: componentName } = args as { name: string }; - - try { - const intelligencePath = PATHS.intelligence; - const content = await fs.readFile(intelligencePath, 'utf-8'); - const intelligence = JSON.parse(content); - - const importGraph = intelligence.importGraph || {}; - const usages = importGraph.usages || {}; - - // Find matching usages (exact match or partial match) - let matchedUsage = usages[componentName]; - - // Try partial match if exact match not found - if (!matchedUsage) { - const matchingKeys = Object.keys(usages).filter( - (key) => key.includes(componentName) || componentName.includes(key) - ); - if (matchingKeys.length > 0) { - matchedUsage = usages[matchingKeys[0]]; - } - } - - if (matchedUsage) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - component: componentName, - usageCount: matchedUsage.usageCount, - usedIn: matchedUsage.usedIn - }, - null, - 2 - ) - } - ] - }; - } else { - // Show top used as alternatives - const topUsed = importGraph.topUsed || []; - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'not_found', - component: componentName, - message: `No usages found for '${componentName}'.`, - suggestions: topUsed.slice(0, 10) - }, - null, - 2 - ) - } - ] - }; - } - } catch (error) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: 'Failed to get component usage. Run indexing first.', - error: error instanceof Error ? error.message : String(error) - }, - null, - 2 - ) - } - ] - }; - } - } - - case 'detect_circular_dependencies': { - const { scope } = args as { scope?: string }; - - try { - const intelligencePath = PATHS.intelligence; - const content = await fs.readFile(intelligencePath, 'utf-8'); - const intelligence = JSON.parse(content); - - if (!intelligence.internalFileGraph) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: - 'Internal file graph not found. Please run refresh_index to rebuild the index with cycle detection support.' - }, - null, - 2 - ) - } - ] - }; - } - - // Reconstruct the graph from stored data - const graph = InternalFileGraph.fromJSON(intelligence.internalFileGraph, ROOT_PATH); - const cycles = graph.findCycles(scope); - const graphStats = intelligence.internalFileGraph.stats || graph.getStats(); - - if (cycles.length === 0) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - message: scope - ? `No circular dependencies detected in scope: ${scope}` - : 'No circular dependencies detected in the codebase.', - scope, - graphStats - }, - null, - 2 - ) - } - ] - }; - } - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'warning', - message: `Found ${cycles.length} circular dependency cycle(s).`, - scope, - cycles: cycles.map((c) => ({ - files: c.files, - length: c.length, - severity: c.length === 2 ? 'high' : c.length <= 3 ? 'medium' : 'low' - })), - count: cycles.length, - graphStats, - advice: - 'Shorter cycles (length 2-3) are typically more problematic. Consider breaking the cycle by extracting shared dependencies.' - }, - null, - 2 - ) - } - ] - }; - } catch (error) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: 'Failed to detect circular dependencies. Run indexing first.', - error: error instanceof Error ? error.message : String(error) - }, - null, - 2 - ) - } - ] - }; - } - } - - case 'remember': { - const args_typed = args as { - type?: MemoryType; - category: MemoryCategory; - memory: string; - reason: string; - }; - - const { type = 'decision', category, memory, reason } = args_typed; - - try { - const crypto = await import('crypto'); - const memoryPath = PATHS.memory; - - const hashContent = `${type}:${category}:${memory}:${reason}`; - const hash = crypto.createHash('sha256').update(hashContent).digest('hex'); - const id = hash.substring(0, 12); - - const newMemory: Memory = { - id, - type, - category, - memory, - reason, - date: new Date().toISOString() - }; - - const result = await appendMemoryFile(memoryPath, newMemory); - - if (result.status === 'duplicate') { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'info', - message: 'This memory was already recorded.', - memory: result.memory - }, - null, - 2 - ) - } - ] - }; - } - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - message: 'Memory recorded successfully.', - memory: result.memory - }, - null, - 2 - ) - } - ] - }; - } catch (error) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: 'Failed to record memory.', - error: error instanceof Error ? error.message : String(error) - }, - null, - 2 - ) - } - ] - }; - } - } - - case 'get_memory': { - const { category, type, query } = args as { - category?: MemoryCategory; - type?: MemoryType; - query?: string; - }; - - try { - const memoryPath = PATHS.memory; - const allMemories = await readMemoriesFile(memoryPath); - - if (allMemories.length === 0) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - message: - "No team conventions recorded yet. Use 'remember' to build tribal knowledge or memory when the user corrects you over a repeatable pattern.", - memories: [], - count: 0 - }, - null, - 2 - ) - } - ] - }; - } - - const filtered = filterMemories(allMemories, { category, type, query }); - const limited = applyUnfilteredLimit(filtered, { category, type, query }, 20); - - // Enrich with confidence decay - const enriched = withConfidence(limited.memories); - const staleCount = enriched.filter((m) => m.stale).length; - - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'success', - count: enriched.length, - totalCount: limited.totalCount, - truncated: limited.truncated, - ...(staleCount > 0 && { - staleCount, - staleNote: `${staleCount} memor${staleCount === 1 ? 'y' : 'ies'} below 30% confidence. Consider reviewing or removing.` - }), - message: limited.truncated - ? 'Showing 20 most recent. Use filters (category/type/query) for targeted results.' - : undefined, - memories: enriched - }, - null, - 2 - ) - } - ] - }; - } catch (error) { - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - status: 'error', - message: 'Failed to retrieve memories.', - error: error instanceof Error ? error.message : String(error) - }, - null, - 2 - ) - } - ] - }; - } - } - - default: - return { - content: [ - { - type: 'text', - text: JSON.stringify( - { - error: `Unknown tool: ${name}` - }, - null, - 2 - ) - } - ], - isError: true - }; - } + const ctx: ToolContext = { + indexState, + paths: PATHS, + rootPath: ROOT_PATH, + performIndexing + }; + return await dispatchTool(name, args as Record, ctx); } catch (error) { return { content: [ diff --git a/src/tools/detect-circular-dependencies.ts b/src/tools/detect-circular-dependencies.ts new file mode 100644 index 0000000..bcf867c --- /dev/null +++ b/src/tools/detect-circular-dependencies.ts @@ -0,0 +1,124 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { promises as fs } from 'fs'; +import type { ToolContext, ToolResponse } from './types.js'; +import { InternalFileGraph } from '../utils/usage-tracker.js'; + +export const definition: Tool = { + name: 'detect_circular_dependencies', + description: + 'Analyze the import graph to detect circular dependencies between files. ' + + 'Circular dependencies can cause initialization issues, tight coupling, and maintenance problems. ' + + 'Returns all detected cycles sorted by length (shorter cycles are often more problematic).', + inputSchema: { + type: 'object', + properties: { + scope: { + type: 'string', + description: + "Optional path prefix to limit analysis (e.g., 'src/features', 'libs/shared')" + } + } + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { scope } = args as { scope?: string }; + + try { + const intelligencePath = ctx.paths.intelligence; + const content = await fs.readFile(intelligencePath, 'utf-8'); + const intelligence = JSON.parse(content); + + if (!intelligence.internalFileGraph) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: + 'Internal file graph not found. Please run refresh_index to rebuild the index with cycle detection support.' + }, + null, + 2 + ) + } + ] + }; + } + + // Reconstruct the graph from stored data + const graph = InternalFileGraph.fromJSON(intelligence.internalFileGraph, ctx.rootPath); + const cycles = graph.findCycles(scope); + const graphStats = intelligence.internalFileGraph.stats || graph.getStats(); + + if (cycles.length === 0) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + message: scope + ? `No circular dependencies detected in scope: ${scope}` + : 'No circular dependencies detected in the codebase.', + scope, + graphStats + }, + null, + 2 + ) + } + ] + }; + } + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'warning', + message: `Found ${cycles.length} circular dependency cycle(s).`, + scope, + cycles: cycles.map((c) => ({ + files: c.files, + length: c.length, + severity: c.length === 2 ? 'high' : c.length <= 3 ? 'medium' : 'low' + })), + count: cycles.length, + graphStats, + advice: + 'Shorter cycles (length 2-3) are typically more problematic. Consider breaking the cycle by extracting shared dependencies.' + }, + null, + 2 + ) + } + ] + }; + } catch (error) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: 'Failed to detect circular dependencies. Run indexing first.', + error: error instanceof Error ? error.message : String(error) + }, + null, + 2 + ) + } + ] + }; + } +} diff --git a/src/tools/get-codebase-metadata.ts b/src/tools/get-codebase-metadata.ts new file mode 100644 index 0000000..93f8a1e --- /dev/null +++ b/src/tools/get-codebase-metadata.ts @@ -0,0 +1,66 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { promises as fs } from 'fs'; +import type { ToolContext, ToolResponse } from './types.js'; +import { CodebaseIndexer } from '../core/indexer.js'; + +export const definition: Tool = { + name: 'get_codebase_metadata', + description: + 'Get codebase metadata including framework information, dependencies, architecture patterns, ' + + 'and project statistics.', + inputSchema: { + type: 'object', + properties: {} + } +}; + +export async function handle( + _args: Record, + ctx: ToolContext +): Promise { + const indexer = new CodebaseIndexer({ rootPath: ctx.rootPath }); + const metadata = await indexer.detectMetadata(); + + // Load team patterns from intelligence file + let teamPatterns = {}; + try { + const intelligencePath = ctx.paths.intelligence; + const intelligenceContent = await fs.readFile(intelligencePath, 'utf-8'); + const intelligence = JSON.parse(intelligenceContent); + + if (intelligence.patterns) { + teamPatterns = { + dependencyInjection: intelligence.patterns.dependencyInjection, + stateManagement: intelligence.patterns.stateManagement, + componentInputs: intelligence.patterns.componentInputs + }; + } + } catch (_error) { + // No intelligence file or parsing error + } + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + metadata: { + name: metadata.name, + framework: metadata.framework, + languages: metadata.languages, + dependencies: metadata.dependencies.slice(0, 20), + architecture: metadata.architecture, + projectStructure: metadata.projectStructure, + statistics: metadata.statistics, + teamPatterns + } + }, + null, + 2 + ) + } + ] + }; +} diff --git a/src/tools/get-component-usage.ts b/src/tools/get-component-usage.ts new file mode 100644 index 0000000..44f9e81 --- /dev/null +++ b/src/tools/get-component-usage.ts @@ -0,0 +1,108 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { promises as fs } from 'fs'; +import type { ToolContext, ToolResponse } from './types.js'; + +export const definition: Tool = { + name: 'get_component_usage', + description: + 'Find WHERE a library or component is used in the codebase. ' + + "This is 'Find Usages' - returns all files that import a given package/module. " + + "Example: get_component_usage('@mycompany/utils') -> shows all files using it.", + inputSchema: { + type: 'object', + properties: { + name: { + type: 'string', + description: + "Import source to find usages for (e.g., 'primeng/table', '@mycompany/ui/button', 'lodash')" + } + }, + required: ['name'] + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { name: componentName } = args as { name: string }; + + try { + const intelligencePath = ctx.paths.intelligence; + const content = await fs.readFile(intelligencePath, 'utf-8'); + const intelligence = JSON.parse(content); + + const importGraph = intelligence.importGraph || {}; + const usages = importGraph.usages || {}; + + // Find matching usages (exact match or partial match) + let matchedUsage = usages[componentName]; + + // Try partial match if exact match not found + if (!matchedUsage) { + const matchingKeys = Object.keys(usages).filter( + (key) => key.includes(componentName) || componentName.includes(key) + ); + if (matchingKeys.length > 0) { + matchedUsage = usages[matchingKeys[0]]; + } + } + + if (matchedUsage) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + component: componentName, + usageCount: matchedUsage.usageCount, + usedIn: matchedUsage.usedIn + }, + null, + 2 + ) + } + ] + }; + } else { + // Show top used as alternatives + const topUsed = importGraph.topUsed || []; + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'not_found', + component: componentName, + message: `No usages found for '${componentName}'.`, + suggestions: topUsed.slice(0, 10) + }, + null, + 2 + ) + } + ] + }; + } + } catch (error) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: 'Failed to get component usage. Run indexing first.', + error: error instanceof Error ? error.message : String(error) + }, + null, + 2 + ) + } + ] + }; + } +} diff --git a/src/tools/get-indexing-status.ts b/src/tools/get-indexing-status.ts new file mode 100644 index 0000000..9da86ca --- /dev/null +++ b/src/tools/get-indexing-status.ts @@ -0,0 +1,56 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import type { ToolContext, ToolResponse } from './types.js'; + +export const definition: Tool = { + name: 'get_indexing_status', + description: + 'Get current indexing status: state, statistics, and progress. ' + + 'Use refresh_index to manually trigger re-indexing when needed.', + inputSchema: { + type: 'object', + properties: {} + } +}; + +export async function handle( + _args: Record, + ctx: ToolContext +): Promise { + const progress = ctx.indexState.indexer?.getProgress(); + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: ctx.indexState.status, + rootPath: ctx.rootPath, + lastIndexed: ctx.indexState.lastIndexed?.toISOString(), + stats: ctx.indexState.stats + ? { + totalFiles: ctx.indexState.stats.totalFiles, + indexedFiles: ctx.indexState.stats.indexedFiles, + totalChunks: ctx.indexState.stats.totalChunks, + duration: `${(ctx.indexState.stats.duration / 1000).toFixed(2)}s`, + incremental: ctx.indexState.stats.incremental + } + : undefined, + progress: progress + ? { + phase: progress.phase, + percentage: progress.percentage, + filesProcessed: progress.filesProcessed, + totalFiles: progress.totalFiles + } + : undefined, + error: ctx.indexState.error, + hint: 'Use refresh_index to manually trigger re-indexing when needed.' + }, + null, + 2 + ) + } + ] + }; +} diff --git a/src/tools/get-memory.ts b/src/tools/get-memory.ts new file mode 100644 index 0000000..36e0e3a --- /dev/null +++ b/src/tools/get-memory.ts @@ -0,0 +1,123 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import type { ToolContext, ToolResponse } from './types.js'; +import type { MemoryCategory, MemoryType } from '../types/index.js'; +import { + readMemoriesFile, + filterMemories, + applyUnfilteredLimit, + withConfidence +} from '../memory/store.js'; + +export const definition: Tool = { + name: 'get_memory', + description: + 'Retrieves team conventions, architectural decisions, and known gotchas.\n' + + 'CALL BEFORE suggesting patterns, libraries, or architecture.\n\n' + + 'Filters: category (tooling/architecture/testing/dependencies/conventions), type (convention/decision/gotcha), query (keyword search).', + inputSchema: { + type: 'object', + properties: { + category: { + type: 'string', + description: 'Filter by category', + enum: ['tooling', 'architecture', 'testing', 'dependencies', 'conventions'] + }, + type: { + type: 'string', + description: 'Filter by memory type', + enum: ['convention', 'decision', 'gotcha', 'failure'] + }, + query: { + type: 'string', + description: 'Keyword search across memory and reason' + } + } + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { category, type, query } = args as { + category?: MemoryCategory; + type?: MemoryType; + query?: string; + }; + + try { + const memoryPath = ctx.paths.memory; + const allMemories = await readMemoriesFile(memoryPath); + + if (allMemories.length === 0) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + message: + "No team conventions recorded yet. Use 'remember' to build tribal knowledge or memory when the user corrects you over a repeatable pattern.", + memories: [], + count: 0 + }, + null, + 2 + ) + } + ] + }; + } + + const filtered = filterMemories(allMemories, { category, type, query }); + const limited = applyUnfilteredLimit(filtered, { category, type, query }, 20); + + // Enrich with confidence decay + const enriched = withConfidence(limited.memories); + const staleCount = enriched.filter((m) => m.stale).length; + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + count: enriched.length, + totalCount: limited.totalCount, + truncated: limited.truncated, + ...(staleCount > 0 && { + staleCount, + staleNote: `${staleCount} memor${staleCount === 1 ? 'y' : 'ies'} below 30% confidence. Consider reviewing or removing.` + }), + message: limited.truncated + ? 'Showing 20 most recent. Use filters (category/type/query) for targeted results.' + : undefined, + memories: enriched + }, + null, + 2 + ) + } + ] + }; + } catch (error) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: 'Failed to retrieve memories.', + error: error instanceof Error ? error.message : String(error) + }, + null, + 2 + ) + } + ] + }; + } +} diff --git a/src/tools/get-style-guide.ts b/src/tools/get-style-guide.ts new file mode 100644 index 0000000..f32f2c0 --- /dev/null +++ b/src/tools/get-style-guide.ts @@ -0,0 +1,185 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { promises as fs } from 'fs'; +import { glob } from 'glob'; +import path from 'path'; +import type { ToolContext, ToolResponse } from './types.js'; + +export const definition: Tool = { + name: 'get_style_guide', + description: 'Query style guide rules and architectural patterns from project documentation.', + inputSchema: { + type: 'object', + properties: { + query: { + type: 'string', + description: + 'Query for specific style guide rules (e.g., "component naming", "service patterns")' + }, + category: { + type: 'string', + description: 'Filter by category (naming, structure, patterns, testing)' + } + } + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { query, category } = args as { + query?: string; + category?: string; + }; + const queryStr = typeof query === 'string' ? query.trim() : ''; + const queryLower = queryStr.toLowerCase(); + const queryTerms = queryLower.split(/\s+/).filter(Boolean); + const categoryLower = typeof category === 'string' ? category.trim().toLowerCase() : ''; + const limitedMode = queryTerms.length === 0; + const LIMITED_MAX_FILES = 3; + const LIMITED_MAX_SECTIONS_PER_FILE = 2; + + const styleGuidePatterns = [ + 'STYLE_GUIDE.md', + 'CODING_STYLE.md', + 'ARCHITECTURE.md', + 'CONTRIBUTING.md', + 'docs/style-guide.md', + 'docs/coding-style.md', + 'docs/ARCHITECTURE.md' + ]; + + const foundGuides: Array<{ + file: string; + content: string; + relevantSections: string[]; + }> = []; + + for (const pattern of styleGuidePatterns) { + try { + const files = await glob(pattern, { + cwd: ctx.rootPath, + absolute: true + }); + for (const file of files) { + try { + // Normalize line endings to \n for consistent output + const rawContent = await fs.readFile(file, 'utf-8'); + const content = rawContent.replace(/\r\n/g, '\n'); + const relativePath = path.relative(ctx.rootPath, file); + + // Find relevant sections based on query + const sections = content.split(/^##\s+/m); + const relevantSections: string[] = []; + if (limitedMode) { + const headings = (content.match(/^##\s+.+$/gm) || []) + .map((h) => h.trim()) + .filter(Boolean) + .slice(0, LIMITED_MAX_SECTIONS_PER_FILE); + + if (headings.length > 0) { + relevantSections.push(...headings); + } else { + const words = content.split(/\s+/).filter(Boolean); + if (words.length > 0) { + relevantSections.push(`Overview: ${words.slice(0, 80).join(' ')}...`); + } + } + } else { + for (const section of sections) { + const sectionLower = section.toLowerCase(); + const isRelevant = queryTerms.some((term) => sectionLower.includes(term)); + if (isRelevant) { + // Limit section size to ~500 words + const words = section.split(/\s+/); + const truncated = words.slice(0, 500).join(' '); + relevantSections.push( + '## ' + (words.length > 500 ? truncated + '...' : section.trim()) + ); + } + } + } + + const categoryMatch = + !categoryLower || + relativePath.toLowerCase().includes(categoryLower) || + relevantSections.some((section) => section.toLowerCase().includes(categoryLower)); + if (!categoryMatch) { + continue; + } + + if (relevantSections.length > 0) { + foundGuides.push({ + file: relativePath, + content: content.slice(0, 200) + '...', + relevantSections: relevantSections.slice( + 0, + limitedMode ? LIMITED_MAX_SECTIONS_PER_FILE : 3 + ) + }); + } + } catch (_e) { + // Skip unreadable files + } + } + } catch (_e) { + // Pattern didn't match, continue + } + } + + const results = limitedMode ? foundGuides.slice(0, LIMITED_MAX_FILES) : foundGuides; + + if (results.length === 0) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'no_results', + message: limitedMode + ? 'No style guide files found in the default locations.' + : `No style guide content found matching: ${queryStr}`, + searchedPatterns: styleGuidePatterns, + hint: limitedMode + ? "Run get_style_guide with a query or category (e.g. category: 'testing') for targeted results." + : "Try broader terms like 'naming', 'patterns', 'testing', 'components'" + }, + null, + 2 + ) + } + ] + }; + } + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + query: queryStr || undefined, + category, + limited: limitedMode, + notice: limitedMode + ? 'No query provided. Results are capped. Provide query and/or category for targeted guidance.' + : undefined, + resultLimits: limitedMode + ? { + maxFiles: LIMITED_MAX_FILES, + maxSectionsPerFile: LIMITED_MAX_SECTIONS_PER_FILE + } + : undefined, + results, + totalFiles: results.length, + totalMatches: foundGuides.length + }, + null, + 2 + ) + } + ] + }; +} diff --git a/src/tools/get-symbol-references.ts b/src/tools/get-symbol-references.ts new file mode 100644 index 0000000..36a5643 --- /dev/null +++ b/src/tools/get-symbol-references.ts @@ -0,0 +1,93 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import type { ToolContext, ToolResponse } from './types.js'; +import { findSymbolReferences } from '../core/symbol-references.js'; + +export const definition: Tool = { + name: 'get_symbol_references', + description: + 'Find concrete references to a symbol in indexed chunks. Returns total usageCount and top usage snippets.', + inputSchema: { + type: 'object', + properties: { + symbol: { + type: 'string', + description: + 'Symbol name to find references for (for example: parseConfig or UserService)' + }, + limit: { + type: 'number', + description: 'Maximum number of usage snippets to return (default: 10)', + default: 10 + } + }, + required: ['symbol'] + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { symbol, limit } = args as { symbol?: unknown; limit?: unknown }; + const normalizedSymbol = typeof symbol === 'string' ? symbol.trim() : ''; + const normalizedLimit = + typeof limit === 'number' && Number.isFinite(limit) && limit > 0 ? Math.floor(limit) : 10; + + if (!normalizedSymbol) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: "Invalid params: 'symbol' is required and must be a non-empty string." + }, + null, + 2 + ) + } + ], + isError: true + }; + } + + const result = await findSymbolReferences(ctx.rootPath, normalizedSymbol, normalizedLimit); + + if (result.status === 'error') { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + symbol: normalizedSymbol, + message: result.message + }, + null, + 2 + ) + } + ] + }; + } + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + symbol: result.symbol, + usageCount: result.usageCount, + usages: result.usages + }, + null, + 2 + ) + } + ] + }; +} diff --git a/src/tools/get-team-patterns.ts b/src/tools/get-team-patterns.ts new file mode 100644 index 0000000..46b7d5a --- /dev/null +++ b/src/tools/get-team-patterns.ts @@ -0,0 +1,146 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { promises as fs } from 'fs'; +import type { ToolContext, ToolResponse } from './types.js'; +import { readMemoriesFile } from '../memory/store.js'; +import { + isComplementaryPatternConflict, + shouldSkipLegacyTestingFrameworkCategory +} from '../patterns/semantics.js'; + +export const definition: Tool = { + name: 'get_team_patterns', + description: + 'Get actionable team pattern recommendations based on codebase analysis. ' + + 'Returns consensus patterns for DI, state management, testing, library wrappers, etc.', + inputSchema: { + type: 'object', + properties: { + category: { + type: 'string', + description: 'Pattern category to retrieve', + enum: ['all', 'di', 'state', 'testing', 'libraries'] + } + } + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { category } = args as { category?: string }; + + try { + const intelligencePath = ctx.paths.intelligence; + const content = await fs.readFile(intelligencePath, 'utf-8'); + const intelligence = JSON.parse(content); + + const result: any = { status: 'success' }; + + if (category === 'all' || !category) { + result.patterns = intelligence.patterns || {}; + result.goldenFiles = intelligence.goldenFiles || []; + if (intelligence.tsconfigPaths) { + result.tsconfigPaths = intelligence.tsconfigPaths; + } + } else if (category === 'di') { + result.dependencyInjection = intelligence.patterns?.dependencyInjection; + } else if (category === 'state') { + result.stateManagement = intelligence.patterns?.stateManagement; + } else if (category === 'testing') { + result.unitTestFramework = intelligence.patterns?.unitTestFramework; + result.e2eFramework = intelligence.patterns?.e2eFramework; + result.testingFramework = intelligence.patterns?.testingFramework; + result.testMocking = intelligence.patterns?.testMocking; + } else if (category === 'libraries') { + result.topUsed = intelligence.importGraph?.topUsed || []; + if (intelligence.tsconfigPaths) { + result.tsconfigPaths = intelligence.tsconfigPaths; + } + } + + // Load and append matching memories + try { + const allMemories = await readMemoriesFile(ctx.paths.memory); + + // Map pattern categories to decision categories + const categoryMap: Record = { + all: ['tooling', 'architecture', 'testing', 'dependencies', 'conventions'], + di: ['architecture', 'conventions'], + state: ['architecture', 'conventions'], + testing: ['testing'], + libraries: ['dependencies'] + }; + + const relevantCategories = categoryMap[category || 'all'] || []; + const matchingMemories = allMemories.filter((m) => + relevantCategories.includes(m.category) + ); + + if (matchingMemories.length > 0) { + result.memories = matchingMemories; + } + } catch (_error) { + // No memory file yet, that's fine - don't fail the whole request + } + + // Detect pattern conflicts: primary < 80% and any alternative > 20% + const conflicts: any[] = []; + const patternsData = intelligence.patterns || {}; + const hasUnitTestFramework = Boolean(patternsData.unitTestFramework?.primary); + for (const [cat, data] of Object.entries(patternsData)) { + if (shouldSkipLegacyTestingFrameworkCategory(cat, patternsData)) continue; + if (category && category !== 'all' && cat !== category) continue; + if (!data.primary || !data.alsoDetected?.length) continue; + + const primaryFreq = parseFloat(data.primary.frequency) || 100; + if (primaryFreq >= 80) continue; + + for (const alt of data.alsoDetected) { + const altFreq = parseFloat(alt.frequency) || 0; + if (altFreq < 20) continue; + if (isComplementaryPatternConflict(cat, data.primary.name, alt.name)) continue; + if (hasUnitTestFramework && cat === 'testingFramework') continue; + + conflicts.push({ + category: cat, + primary: { + name: data.primary.name, + adoption: data.primary.frequency, + trend: data.primary.trend + }, + alternative: { + name: alt.name, + adoption: alt.frequency, + trend: alt.trend + }, + note: `Split decision: ${data.primary.frequency} ${data.primary.name} (${data.primary.trend || 'unknown'}) vs ${alt.frequency} ${alt.name} (${alt.trend || 'unknown'})` + }); + } + } + if (conflicts.length > 0) { + result.conflicts = conflicts; + } + + return { + content: [{ type: 'text', text: JSON.stringify(result, null, 2) }] + }; + } catch (error) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: 'Failed to load team patterns', + error: error instanceof Error ? error.message : String(error) + }, + null, + 2 + ) + } + ] + }; + } +} diff --git a/src/tools/index.ts b/src/tools/index.ts new file mode 100644 index 0000000..0936946 --- /dev/null +++ b/src/tools/index.ts @@ -0,0 +1,55 @@ +export type { ToolContext, ToolResponse, ToolPaths } from './types.js'; + +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; + +import { definition as d1, handle as h1 } from './search-codebase.js'; +import { definition as d2, handle as h2 } from './get-codebase-metadata.js'; +import { definition as d3, handle as h3 } from './get-indexing-status.js'; +import { definition as d4, handle as h4 } from './refresh-index.js'; +import { definition as d5, handle as h5 } from './get-style-guide.js'; +import { definition as d6, handle as h6 } from './get-team-patterns.js'; +import { definition as d7, handle as h7 } from './get-symbol-references.js'; +import { definition as d8, handle as h8 } from './get-component-usage.js'; +import { definition as d9, handle as h9 } from './detect-circular-dependencies.js'; +import { definition as d10, handle as h10 } from './remember.js'; +import { definition as d11, handle as h11 } from './get-memory.js'; + +import type { ToolContext, ToolResponse } from './types.js'; + +export const TOOLS: Tool[] = [d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11]; + +export async function dispatchTool( + name: string, + args: Record, + ctx: ToolContext +): Promise { + switch (name) { + case 'search_codebase': + return h1(args, ctx); + case 'get_codebase_metadata': + return h2(args, ctx); + case 'get_indexing_status': + return h3(args, ctx); + case 'refresh_index': + return h4(args, ctx); + case 'get_style_guide': + return h5(args, ctx); + case 'get_team_patterns': + return h6(args, ctx); + case 'get_symbol_references': + return h7(args, ctx); + case 'get_component_usage': + return h8(args, ctx); + case 'detect_circular_dependencies': + return h9(args, ctx); + case 'remember': + return h10(args, ctx); + case 'get_memory': + return h11(args, ctx); + default: + return { + content: [{ type: 'text', text: JSON.stringify({ error: `Unknown tool: ${name}` }) }], + isError: true + }; + } +} diff --git a/src/tools/refresh-index.ts b/src/tools/refresh-index.ts new file mode 100644 index 0000000..3c1b647 --- /dev/null +++ b/src/tools/refresh-index.ts @@ -0,0 +1,55 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import type { ToolContext, ToolResponse } from './types.js'; + +export const definition: Tool = { + name: 'refresh_index', + description: + 'Re-index the codebase. Supports full re-index or incremental mode. ' + + 'Use incrementalOnly=true to only process files changed since last index.', + inputSchema: { + type: 'object', + properties: { + reason: { + type: 'string', + description: 'Reason for refreshing the index (for logging)' + }, + incrementalOnly: { + type: 'boolean', + description: + 'If true, only re-index files changed since last full index (faster). Default: false (full re-index)' + } + } + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { reason, incrementalOnly } = args as { reason?: string; incrementalOnly?: boolean }; + + const mode = incrementalOnly ? 'incremental' : 'full'; + console.error(`Refresh requested (${mode}): ${reason || 'Manual trigger'}`); + + ctx.performIndexing(incrementalOnly); + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'started', + mode, + message: incrementalOnly + ? 'Incremental re-indexing started. Only changed files will be re-embedded.' + : 'Full re-indexing started. Check status with get_indexing_status.', + reason + }, + null, + 2 + ) + } + ] + }; +} diff --git a/src/tools/remember.ts b/src/tools/remember.ts new file mode 100644 index 0000000..030e997 --- /dev/null +++ b/src/tools/remember.ts @@ -0,0 +1,133 @@ +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import type { ToolContext, ToolResponse } from './types.js'; +import type { Memory, MemoryCategory, MemoryType } from '../types/index.js'; +import { appendMemoryFile } from '../memory/store.js'; + +export const definition: Tool = { + name: 'remember', + description: + 'CALL IMMEDIATELY when user explicitly asks to remember/record something.\n\n' + + 'USER TRIGGERS:\n' + + '- "Remember this: [X]"\n' + + '- "Record this: [Y]"\n' + + '- "Save this for next time: [Z]"\n\n' + + 'DO NOT call unless user explicitly requests it.\n\n' + + 'HOW TO WRITE:\n' + + '- ONE convention per memory (if user lists 5 things, call this 5 times)\n' + + '- memory: 5-10 words (the specific rule)\n' + + '- reason: 1 sentence (why it matters)\n' + + '- Skip: one-time features, code examples, essays', + inputSchema: { + type: 'object', + properties: { + type: { + type: 'string', + enum: ['convention', 'decision', 'gotcha', 'failure'], + description: + 'Type of memory being recorded. Use "failure" for things that were tried and failed - ' + + 'prevents repeating the same mistakes.' + }, + category: { + type: 'string', + description: 'Broader category for filtering', + enum: ['tooling', 'architecture', 'testing', 'dependencies', 'conventions'] + }, + memory: { + type: 'string', + description: 'What to remember (concise)' + }, + reason: { + type: 'string', + description: 'Why this matters or what breaks otherwise' + } + }, + required: ['type', 'category', 'memory', 'reason'] + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const args_typed = args as { + type?: MemoryType; + category: MemoryCategory; + memory: string; + reason: string; + }; + + const { type = 'decision', category, memory, reason } = args_typed; + + try { + const crypto = await import('crypto'); + const memoryPath = ctx.paths.memory; + + const hashContent = `${type}:${category}:${memory}:${reason}`; + const hash = crypto.createHash('sha256').update(hashContent).digest('hex'); + const id = hash.substring(0, 12); + + const newMemory: Memory = { + id, + type, + category, + memory, + reason, + date: new Date().toISOString() + }; + + const result = await appendMemoryFile(memoryPath, newMemory); + + if (result.status === 'duplicate') { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'info', + message: 'This memory was already recorded.', + memory: result.memory + }, + null, + 2 + ) + } + ] + }; + } + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + message: 'Memory recorded successfully.', + memory: result.memory + }, + null, + 2 + ) + } + ] + }; + } catch (error) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: 'Failed to record memory.', + error: error instanceof Error ? error.message : String(error) + }, + null, + 2 + ) + } + ] + }; + } +} diff --git a/src/tools/search-codebase.ts b/src/tools/search-codebase.ts new file mode 100644 index 0000000..8e8169b --- /dev/null +++ b/src/tools/search-codebase.ts @@ -0,0 +1,625 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ + +import type { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { promises as fs } from 'fs'; +import path from 'path'; +import type { ToolContext, ToolResponse } from './types.js'; +import { CodebaseSearcher } from '../core/search.js'; +import { buildEvidenceLock } from '../preflight/evidence-lock.js'; +import { shouldIncludePatternConflictCategory } from '../preflight/query-scope.js'; +import { + isComplementaryPatternCategory, + isComplementaryPatternConflict, + shouldSkipLegacyTestingFrameworkCategory +} from '../patterns/semantics.js'; +import { assessSearchQuality } from '../core/search-quality.js'; +import { IndexCorruptedError } from '../errors/index.js'; +import { readMemoriesFile, withConfidence } from '../memory/store.js'; +import { InternalFileGraph } from '../utils/usage-tracker.js'; + +export const definition: Tool = { + name: 'search_codebase', + description: + 'Search the indexed codebase. Returns ranked results and a searchQuality confidence summary. ' + + 'IMPORTANT: Pass the intent="edit"|"refactor"|"migrate" to get preflight: edit readiness check with evidence gating.', + inputSchema: { + type: 'object', + properties: { + query: { + type: 'string', + description: 'Natural language search query' + }, + intent: { + type: 'string', + enum: ['explore', 'edit', 'refactor', 'migrate'], + description: + 'Optional. Use "edit", "refactor", or "migrate" to get the full preflight card before making changes.' + }, + limit: { + type: 'number', + description: 'Maximum number of results to return (default: 5)', + default: 5 + }, + includeSnippets: { + type: 'boolean', + description: + 'Include code snippets in results (default: false). If you need code, prefer read_file instead.', + default: false + }, + filters: { + type: 'object', + description: 'Optional filters', + properties: { + framework: { + type: 'string', + description: 'Filter by framework (angular, react, vue)' + }, + language: { + type: 'string', + description: 'Filter by programming language' + }, + componentType: { + type: 'string', + description: 'Filter by component type (component, service, directive, etc.)' + }, + layer: { + type: 'string', + description: + 'Filter by architectural layer (presentation, business, data, state, core, shared)' + }, + tags: { + type: 'array', + items: { type: 'string' }, + description: 'Filter by tags' + } + } + } + }, + required: ['query'] + } +}; + +export async function handle( + args: Record, + ctx: ToolContext +): Promise { + const { query, limit, filters, intent, includeSnippets } = args as any; + const queryStr = typeof query === 'string' ? query.trim() : ''; + + if (!queryStr) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + errorCode: 'invalid_params', + message: "Invalid params: 'query' is required and must be a non-empty string.", + hint: "Provide a query like 'how are routes configured' or 'AlbumApiService'." + }, + null, + 2 + ) + } + ], + isError: true + }; + } + + if (ctx.indexState.status === 'indexing') { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'indexing', + message: 'Index is still being built. Retry in a moment.', + progress: ctx.indexState.indexer?.getProgress() + }, + null, + 2 + ) + } + ] + }; + } + + if (ctx.indexState.status === 'error') { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: `Indexing failed: ${ctx.indexState.error}` + }, + null, + 2 + ) + } + ] + }; + } + + const searcher = new CodebaseSearcher(ctx.rootPath); + let results: any[]; + const searchProfile = + intent && ['explore', 'edit', 'refactor', 'migrate'].includes(intent) + ? intent + : 'explore'; + + try { + results = await searcher.search(queryStr, limit || 5, filters, { + profile: searchProfile + }); + } catch (error) { + if (error instanceof IndexCorruptedError) { + console.error('[Auto-Heal] Index corrupted. Triggering full re-index...'); + + await ctx.performIndexing(); + + if (ctx.indexState.status === 'ready') { + console.error('[Auto-Heal] Success. Retrying search...'); + const freshSearcher = new CodebaseSearcher(ctx.rootPath); + try { + results = await freshSearcher.search(queryStr, limit || 5, filters, { + profile: searchProfile + }); + } catch (retryError) { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: `Auto-heal retry failed: ${ + retryError instanceof Error ? retryError.message : String(retryError) + }` + }, + null, + 2 + ) + } + ] + }; + } + } else { + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'error', + message: `Auto-heal failed: Indexing ended with status '${ctx.indexState.status}'`, + error: ctx.indexState.error + }, + null, + 2 + ) + } + ] + }; + } + } else { + throw error; // Propagate unexpected errors + } + } + + // Load memories for keyword matching, enriched with confidence + const allMemories = await readMemoriesFile(ctx.paths.memory); + const allMemoriesWithConf = withConfidence(allMemories); + + const queryTerms = queryStr.toLowerCase().split(/\s+/).filter(Boolean); + const relatedMemories = allMemoriesWithConf + .filter((m) => { + const searchText = `${m.memory} ${m.reason}`.toLowerCase(); + return queryTerms.some((term: string) => searchText.includes(term)); + }) + .sort((a, b) => b.effectiveConfidence - a.effectiveConfidence); + + // Load intelligence data for enrichment (all intents, not just preflight) + let intelligence: any = null; + try { + const intelligenceContent = await fs.readFile(ctx.paths.intelligence, 'utf-8'); + intelligence = JSON.parse(intelligenceContent); + } catch { + /* graceful degradation — intelligence file may not exist yet */ + } + + function computeIndexConfidence(): 'fresh' | 'aging' | 'stale' { + let confidence: 'fresh' | 'aging' | 'stale' = 'stale'; + if (intelligence?.generatedAt) { + const indexAge = Date.now() - new Date(intelligence.generatedAt).getTime(); + const hoursOld = indexAge / (1000 * 60 * 60); + if (hoursOld < 24) { + confidence = 'fresh'; + } else if (hoursOld < 168) { + confidence = 'aging'; + } + } + return confidence; + } + + // Cheap impact breadth estimate from the import graph (used for risk assessment). + function computeImpactCandidates(resultPaths: string[]): string[] { + const impactCandidates: string[] = []; + if (!intelligence?.internalFileGraph?.imports) return impactCandidates; + const allImports = intelligence.internalFileGraph.imports as Record; + for (const [file, deps] of Object.entries(allImports)) { + if ( + deps.some((dep: string) => + resultPaths.some((rp) => dep.endsWith(rp) || rp.endsWith(dep)) + ) + ) { + if (!resultPaths.some((rp) => file.endsWith(rp) || rp.endsWith(file))) { + impactCandidates.push(file); + } + } + } + return impactCandidates; + } + + // Build reverse import map from intelligence graph + const reverseImports = new Map(); + if (intelligence?.internalFileGraph?.imports) { + for (const [file, deps] of Object.entries( + intelligence.internalFileGraph.imports + )) { + for (const dep of deps) { + if (!reverseImports.has(dep)) reverseImports.set(dep, []); + reverseImports.get(dep)!.push(file); + } + } + } + + // Enrich a search result with relationship data + function enrichResult(r: any): any | undefined { + const rPath = r.filePath; + + // importedBy: files that import this result (reverse lookup) + const importedBy: string[] = []; + for (const [dep, importers] of reverseImports) { + if (dep.endsWith(rPath) || rPath.endsWith(dep)) { + importedBy.push(...importers); + } + } + + // imports: files this result depends on (forward lookup) + const imports: string[] = []; + if (intelligence?.internalFileGraph?.imports) { + for (const [file, deps] of Object.entries( + intelligence.internalFileGraph.imports + )) { + if (file.endsWith(rPath) || rPath.endsWith(file)) { + imports.push(...deps); + } + } + } + + // testedIn: heuristic — same basename with .spec/.test extension + const testedIn: string[] = []; + const baseName = path.basename(rPath).replace(/\.[^.]+$/, ''); + if (intelligence?.internalFileGraph?.imports) { + for (const file of Object.keys(intelligence.internalFileGraph.imports)) { + const fileBase = path.basename(file); + if ( + (fileBase.includes('.spec.') || fileBase.includes('.test.')) && + fileBase.startsWith(baseName) + ) { + testedIn.push(file); + } + } + } + + // Only return if we have at least one piece of data + if (importedBy.length === 0 && imports.length === 0 && testedIn.length === 0) { + return undefined; + } + + return { + ...(importedBy.length > 0 && { importedBy }), + ...(imports.length > 0 && { imports }), + ...(testedIn.length > 0 && { testedIn }) + }; + } + + const searchQuality = assessSearchQuality(query, results); + + // Always-on edit preflight (lite): do not require intent and keep payload small. + let editPreflight: any = undefined; + if (intelligence && (!intent || intent === 'explore')) { + try { + const resultPaths = results.map((r) => r.filePath); + const impactCandidates = computeImpactCandidates(resultPaths); + + let riskLevel: 'low' | 'medium' | 'high' = 'low'; + if (impactCandidates.length > 10) { + riskLevel = 'high'; + } else if (impactCandidates.length > 3) { + riskLevel = 'medium'; + } + + // Use existing pattern intelligence for evidenceLock scoring, but keep the output payload lite. + const preferredPatternsForEvidence: Array<{ pattern: string; example?: string }> = []; + const patterns = intelligence.patterns || {}; + for (const [_, data] of Object.entries(patterns)) { + if (data.primary) { + const p = data.primary; + if (p.trend === 'Rising' || p.trend === 'Stable') { + preferredPatternsForEvidence.push({ + pattern: p.name, + ...(p.canonicalExample && { example: p.canonicalExample.file }) + }); + } + } + } + + editPreflight = { + mode: 'lite', + riskLevel, + confidence: computeIndexConfidence(), + evidenceLock: buildEvidenceLock({ + results, + preferredPatterns: preferredPatternsForEvidence.slice(0, 5), + relatedMemories, + failureWarnings: [], + patternConflicts: [], + searchQualityStatus: searchQuality.status + }) + }; + } catch { + // editPreflight is best-effort - never fail search over it + } + } + + // Compose preflight card for edit/refactor/migrate intents + let preflight: any = undefined; + const preflightIntents = ['edit', 'refactor', 'migrate']; + if (intent && preflightIntents.includes(intent) && intelligence) { + try { + // --- Avoid / Prefer patterns --- + const avoidPatterns: any[] = []; + const preferredPatterns: any[] = []; + const patterns = intelligence.patterns || {}; + for (const [category, data] of Object.entries(patterns)) { + // Primary pattern = preferred if Rising or Stable + if (data.primary) { + const p = data.primary; + if (p.trend === 'Rising' || p.trend === 'Stable') { + preferredPatterns.push({ + pattern: p.name, + category, + adoption: p.frequency, + trend: p.trend, + guidance: p.guidance, + ...(p.canonicalExample && { example: p.canonicalExample.file }) + }); + } + } + // Also-detected patterns that are Declining = avoid + if (data.alsoDetected) { + for (const alt of data.alsoDetected) { + if (alt.trend === 'Declining') { + avoidPatterns.push({ + pattern: alt.name, + category, + adoption: alt.frequency, + trend: 'Declining', + guidance: alt.guidance + }); + } + } + } + } + + // --- Impact candidates (files importing the result files) --- + const resultPaths = results.map((r) => r.filePath); + const impactCandidates = computeImpactCandidates(resultPaths); + + // --- Risk level (based on circular deps + impact breadth) --- + let riskLevel: 'low' | 'medium' | 'high' = 'low'; + let cycleCount = 0; + if (intelligence.internalFileGraph) { + try { + const graph = InternalFileGraph.fromJSON(intelligence.internalFileGraph, ctx.rootPath); + // Use directory prefixes as scope (not full file paths) + // findCycles(scope) filters files by startsWith, so a full path would only match itself + const scopes = new Set( + resultPaths.map((rp) => { + const lastSlash = rp.lastIndexOf('/'); + return lastSlash > 0 ? rp.substring(0, lastSlash + 1) : rp; + }) + ); + for (const scope of scopes) { + const cycles = graph.findCycles(scope); + cycleCount += cycles.length; + } + } catch { + // Graph reconstruction failed — skip cycle check + } + } + if (cycleCount > 0 || impactCandidates.length > 10) { + riskLevel = 'high'; + } else if (impactCandidates.length > 3) { + riskLevel = 'medium'; + } + + // --- Golden files (exemplar code) --- + const goldenFiles = (intelligence.goldenFiles || []).slice(0, 3).map((g: any) => ({ + file: g.file, + score: g.score + })); + + // --- Confidence (index freshness) --- + const confidence = computeIndexConfidence(); + + // --- Failure memories (1.5x relevance boost) --- + const failureWarnings = relatedMemories + .filter((m) => m.type === 'failure' && !m.stale) + .map((m) => ({ + memory: m.memory, + reason: m.reason, + confidence: m.effectiveConfidence + })) + .slice(0, 3); + + const preferredPatternsForOutput = preferredPatterns.slice(0, 5); + const avoidPatternsForOutput = avoidPatterns.slice(0, 5); + + // --- Pattern conflicts (split decisions within categories) --- + const patternConflicts: Array<{ + category: string; + primary: { name: string; adoption: string }; + alternative: { name: string; adoption: string }; + }> = []; + const hasUnitTestFramework = Boolean((patterns as any).unitTestFramework?.primary); + for (const [cat, data] of Object.entries(patterns)) { + if (shouldSkipLegacyTestingFrameworkCategory(cat, patterns as any)) continue; + if (!shouldIncludePatternConflictCategory(cat, query)) continue; + if (!data.primary || !data.alsoDetected?.length) continue; + const primaryFreq = parseFloat(data.primary.frequency) || 100; + if (primaryFreq >= 80) continue; + for (const alt of data.alsoDetected) { + const altFreq = parseFloat(alt.frequency) || 0; + if (altFreq >= 20) { + if (isComplementaryPatternConflict(cat, data.primary.name, alt.name)) continue; + if (hasUnitTestFramework && cat === 'testingFramework') continue; + patternConflicts.push({ + category: cat, + primary: { name: data.primary.name, adoption: data.primary.frequency }, + alternative: { name: alt.name, adoption: alt.frequency } + }); + } + } + } + + const evidenceLock = buildEvidenceLock({ + results, + preferredPatterns: preferredPatternsForOutput, + relatedMemories, + failureWarnings, + patternConflicts, + searchQualityStatus: searchQuality.status + }); + + // Bump risk if there are active failure memories for this area + if (failureWarnings.length > 0 && riskLevel === 'low') { + riskLevel = 'medium'; + } + + // If evidence triangulation is weak, avoid claiming low risk + if (evidenceLock.status === 'block' && riskLevel === 'low') { + riskLevel = 'medium'; + } + + // If epistemic stress says abstain, bump risk + if (evidenceLock.epistemicStress?.abstain && riskLevel === 'low') { + riskLevel = 'medium'; + } + + preflight = { + intent, + riskLevel, + confidence, + evidenceLock, + ...(preferredPatternsForOutput.length > 0 && { + preferredPatterns: preferredPatternsForOutput + }), + ...(avoidPatternsForOutput.length > 0 && { + avoidPatterns: avoidPatternsForOutput + }), + ...(goldenFiles.length > 0 && { goldenFiles }), + ...(impactCandidates.length > 0 && { + impactCandidates: impactCandidates.slice(0, 10) + }), + ...(cycleCount > 0 && { circularDependencies: cycleCount }), + ...(failureWarnings.length > 0 && { failureWarnings }) + }; + } catch { + // Preflight construction failed — skip preflight, don't fail the search + } + } + + // For edit/refactor/migrate: return full preflight card (risk, patterns, impact, etc.). + // For explore or lite-only: return flattened { ready, reason }. + let preflightPayload: + | { ready: boolean; reason?: string } + | Record + | undefined; + if (preflight) { + const el = preflight.evidenceLock; + // Full card per tool schema; add top-level ready/reason for backward compatibility + preflightPayload = { + ...preflight, + ready: el?.readyToEdit ?? false, + ...(el && !el.readyToEdit && el.nextAction && { reason: el.nextAction }) + }; + } else if (editPreflight) { + const el = editPreflight.evidenceLock; + preflightPayload = { + ready: el?.readyToEdit ?? false, + ...(el && !el.readyToEdit && el.nextAction && { reason: el.nextAction }) + }; + } + + return { + content: [ + { + type: 'text', + text: JSON.stringify( + { + status: 'success', + searchQuality: { + status: searchQuality.status, + confidence: searchQuality.confidence, + ...(searchQuality.status === 'low_confidence' && + searchQuality.nextSteps?.[0] && { + hint: searchQuality.nextSteps[0] + }) + }, + ...(preflightPayload && { preflight: preflightPayload }), + results: results.map((r) => { + const relationships = enrichResult(r); + // Condensed relationships: importedBy count + hasTests flag + const condensedRel = relationships + ? { + ...(relationships.importedBy && + relationships.importedBy.length > 0 && { + importedByCount: relationships.importedBy.length + }), + ...(relationships.testedIn && + relationships.testedIn.length > 0 && { hasTests: true }) + } + : undefined; + const hasCondensedRel = condensedRel && Object.keys(condensedRel).length > 0; + + return { + file: `${r.filePath}:${r.startLine}-${r.endLine}`, + summary: r.summary, + score: Math.round(r.score * 100) / 100, + ...(r.componentType && r.layer && { type: `${r.componentType}:${r.layer}` }), + ...(r.trend && r.trend !== 'Stable' && { trend: r.trend }), + ...(r.patternWarning && { patternWarning: r.patternWarning }), + ...(hasCondensedRel && { relationships: condensedRel }), + ...(includeSnippets && r.snippet && { snippet: r.snippet }) + }; + }), + totalResults: results.length, + ...(relatedMemories.length > 0 && { + relatedMemories: relatedMemories + .slice(0, 3) + .map((m) => `${m.memory} (${m.effectiveConfidence})`) + }) + }, + null, + 2 + ) + } + ] + }; +} diff --git a/src/tools/types.ts b/src/tools/types.ts new file mode 100644 index 0000000..9b6b2b1 --- /dev/null +++ b/src/tools/types.ts @@ -0,0 +1,31 @@ +import type { CodebaseIndexer } from '../core/indexer.js'; +import type { IndexingStats } from '../types/index.js'; + +export interface ToolPaths { + baseDir: string; + memory: string; + intelligence: string; + keywordIndex: string; + vectorDb: string; +} + +export interface IndexState { + status: 'idle' | 'indexing' | 'ready' | 'error'; + lastIndexed?: Date; + stats?: IndexingStats; + error?: string; + indexer?: CodebaseIndexer; +} + +export interface ToolContext { + indexState: IndexState; + paths: ToolPaths; + rootPath: string; + performIndexing: (incrementalOnly?: boolean) => void; +} + +export interface ToolResponse { + content?: Array<{ type: 'text'; text: string }>; + isError?: boolean; + [key: string]: unknown; +} diff --git a/tests/tools/dispatch.test.ts b/tests/tools/dispatch.test.ts new file mode 100644 index 0000000..62b677b --- /dev/null +++ b/tests/tools/dispatch.test.ts @@ -0,0 +1,84 @@ +import { describe, it, expect } from 'vitest'; +import { TOOLS, dispatchTool } from '../../src/tools/index.js'; +import type { ToolContext } from '../../src/tools/types.js'; + +describe('Tool Dispatch', () => { + it('exports all 11 tools', () => { + expect(TOOLS.length).toBe(11); + expect(TOOLS.map((t) => t.name)).toEqual([ + 'search_codebase', + 'get_codebase_metadata', + 'get_indexing_status', + 'refresh_index', + 'get_style_guide', + 'get_team_patterns', + 'get_symbol_references', + 'get_component_usage', + 'detect_circular_dependencies', + 'remember', + 'get_memory' + ]); + }); + + it('has unique tool names', () => { + const names = TOOLS.map((t) => t.name); + expect(new Set(names).size).toBe(names.length); + }); + + it('all tools have descriptions', () => { + TOOLS.forEach((tool) => { + expect(tool.description).toBeTruthy(); + expect(typeof tool.description).toBe('string'); + }); + }); + + it('all tools have inputSchema', () => { + TOOLS.forEach((tool) => { + expect(tool.inputSchema).toBeDefined(); + expect(tool.inputSchema.type).toBe('object'); + }); + }); + + it('dispatchTool returns error for unknown tool', async () => { + const mockCtx: ToolContext = { + indexState: { status: 'idle' }, + paths: { + baseDir: '/tmp', + memory: '/tmp/memory.jsonl', + intelligence: '/tmp/intelligence.json', + keywordIndex: '/tmp/index.json', + vectorDb: '/tmp/vector-db' + }, + rootPath: '/tmp', + performIndexing: () => undefined + }; + + const result = await dispatchTool('unknown_tool', {}, mockCtx); + + expect(result.isError).toBe(true); + expect(result.content).toBeDefined(); + expect(result.content![0].text).toContain('Unknown tool'); + }); + + it('dispatchTool routes to correct handlers', async () => { + const mockCtx: ToolContext = { + indexState: { status: 'idle' }, + paths: { + baseDir: '/tmp', + memory: '/tmp/memory.jsonl', + intelligence: '/tmp/intelligence.json', + keywordIndex: '/tmp/index.json', + vectorDb: '/tmp/vector-db' + }, + rootPath: '/tmp', + performIndexing: () => undefined + }; + + // Test get_indexing_status (simplest handler without file I/O) + const result = await dispatchTool('get_indexing_status', {}, mockCtx); + expect(result.content).toBeDefined(); + expect(result.content![0].type).toBe('text'); + const text = result.content![0].text; + expect(text).toContain('idle'); + }); +});