|
| 1 | +/** |
| 2 | + * DCP Context command handler. |
| 3 | + * Shows a visual breakdown of token usage in the current session. |
| 4 | + */ |
| 5 | + |
| 6 | +import type { Logger } from "../logger" |
| 7 | +import type { SessionState, WithParts } from "../state" |
| 8 | +import { sendIgnoredMessage } from "../ui/notification" |
| 9 | +import { formatTokenCount } from "../ui/utils" |
| 10 | +import { isMessageCompacted } from "../shared-utils" |
| 11 | +import { isIgnoredUserMessage } from "../messages/utils" |
| 12 | +import { countTokens, getCurrentParams } from "../strategies/utils" |
| 13 | +import type { AssistantMessage, TextPart, ToolPart } from "@opencode-ai/sdk/v2" |
| 14 | + |
| 15 | +export interface ContextCommandContext { |
| 16 | + client: any |
| 17 | + state: SessionState |
| 18 | + logger: Logger |
| 19 | + sessionId: string |
| 20 | + messages: WithParts[] |
| 21 | +} |
| 22 | + |
| 23 | +interface TokenBreakdown { |
| 24 | + system: number |
| 25 | + user: number |
| 26 | + assistant: number |
| 27 | + reasoning: number |
| 28 | + tools: number |
| 29 | + pruned: number |
| 30 | + total: number |
| 31 | +} |
| 32 | + |
| 33 | +function analyzeTokens(state: SessionState, messages: WithParts[]): TokenBreakdown { |
| 34 | + const breakdown: TokenBreakdown = { |
| 35 | + system: 0, |
| 36 | + user: 0, |
| 37 | + assistant: 0, |
| 38 | + reasoning: 0, |
| 39 | + tools: 0, |
| 40 | + pruned: state.stats.totalPruneTokens, |
| 41 | + total: 0, |
| 42 | + } |
| 43 | + |
| 44 | + let firstAssistant: AssistantMessage | undefined |
| 45 | + for (const msg of messages) { |
| 46 | + if (msg.info.role === "assistant") { |
| 47 | + const assistantInfo = msg.info as AssistantMessage |
| 48 | + if (assistantInfo.tokens?.input > 0 || assistantInfo.tokens?.cache?.read > 0) { |
| 49 | + firstAssistant = assistantInfo |
| 50 | + break |
| 51 | + } |
| 52 | + } |
| 53 | + } |
| 54 | + |
| 55 | + let firstUserTokens = 0 |
| 56 | + for (const msg of messages) { |
| 57 | + if (msg.info.role === "user" && !isIgnoredUserMessage(msg)) { |
| 58 | + for (const part of msg.parts) { |
| 59 | + if (part.type === "text") { |
| 60 | + const textPart = part as TextPart |
| 61 | + firstUserTokens += countTokens(textPart.text || "") |
| 62 | + } |
| 63 | + } |
| 64 | + break |
| 65 | + } |
| 66 | + } |
| 67 | + |
| 68 | + // Calculate system tokens: first response's total input minus first user message |
| 69 | + if (firstAssistant) { |
| 70 | + const firstInput = |
| 71 | + (firstAssistant.tokens?.input || 0) + (firstAssistant.tokens?.cache?.read || 0) |
| 72 | + breakdown.system = Math.max(0, firstInput - firstUserTokens) |
| 73 | + } |
| 74 | + |
| 75 | + let lastAssistant: AssistantMessage | undefined |
| 76 | + for (let i = messages.length - 1; i >= 0; i--) { |
| 77 | + const msg = messages[i] |
| 78 | + if (msg.info.role === "assistant") { |
| 79 | + const assistantInfo = msg.info as AssistantMessage |
| 80 | + if (assistantInfo.tokens?.output > 0) { |
| 81 | + lastAssistant = assistantInfo |
| 82 | + break |
| 83 | + } |
| 84 | + } |
| 85 | + } |
| 86 | + |
| 87 | + // Get total from API |
| 88 | + // Total = input + output + reasoning + cache.read + cache.write |
| 89 | + const apiInput = lastAssistant?.tokens?.input || 0 |
| 90 | + const apiOutput = lastAssistant?.tokens?.output || 0 |
| 91 | + const apiReasoning = lastAssistant?.tokens?.reasoning || 0 |
| 92 | + const apiCacheRead = lastAssistant?.tokens?.cache?.read || 0 |
| 93 | + const apiCacheWrite = lastAssistant?.tokens?.cache?.write || 0 |
| 94 | + const apiTotal = apiInput + apiOutput + apiReasoning + apiCacheRead + apiCacheWrite |
| 95 | + |
| 96 | + for (const msg of messages) { |
| 97 | + if (isMessageCompacted(state, msg)) { |
| 98 | + continue |
| 99 | + } |
| 100 | + |
| 101 | + if (msg.info.role === "user" && isIgnoredUserMessage(msg)) { |
| 102 | + continue |
| 103 | + } |
| 104 | + |
| 105 | + const info = msg.info |
| 106 | + const role = info.role |
| 107 | + |
| 108 | + for (const part of msg.parts) { |
| 109 | + switch (part.type) { |
| 110 | + case "text": { |
| 111 | + const textPart = part as TextPart |
| 112 | + const tokens = countTokens(textPart.text || "") |
| 113 | + if (role === "user") { |
| 114 | + breakdown.user += tokens |
| 115 | + } else { |
| 116 | + breakdown.assistant += tokens |
| 117 | + } |
| 118 | + break |
| 119 | + } |
| 120 | + case "tool": { |
| 121 | + const toolPart = part as ToolPart |
| 122 | + |
| 123 | + if (toolPart.state?.input) { |
| 124 | + const inputStr = |
| 125 | + typeof toolPart.state.input === "string" |
| 126 | + ? toolPart.state.input |
| 127 | + : JSON.stringify(toolPart.state.input) |
| 128 | + breakdown.tools += countTokens(inputStr) |
| 129 | + } |
| 130 | + |
| 131 | + if (toolPart.state?.status === "completed" && toolPart.state?.output) { |
| 132 | + const outputStr = |
| 133 | + typeof toolPart.state.output === "string" |
| 134 | + ? toolPart.state.output |
| 135 | + : JSON.stringify(toolPart.state.output) |
| 136 | + breakdown.tools += countTokens(outputStr) |
| 137 | + } |
| 138 | + break |
| 139 | + } |
| 140 | + } |
| 141 | + } |
| 142 | + } |
| 143 | + |
| 144 | + breakdown.tools = Math.max(0, breakdown.tools - breakdown.pruned) |
| 145 | + |
| 146 | + // Calculate reasoning as the difference between API total and our counted parts |
| 147 | + // This handles both interleaved thinking and non-interleaved models correctly |
| 148 | + const countedParts = breakdown.system + breakdown.user + breakdown.assistant + breakdown.tools |
| 149 | + breakdown.reasoning = Math.max(0, apiTotal - countedParts) |
| 150 | + |
| 151 | + breakdown.total = apiTotal |
| 152 | + |
| 153 | + return breakdown |
| 154 | +} |
| 155 | + |
| 156 | +function createBar(value: number, maxValue: number, width: number, char: string = "█"): string { |
| 157 | + if (maxValue === 0) return "" |
| 158 | + const filled = Math.round((value / maxValue) * width) |
| 159 | + const bar = char.repeat(Math.max(0, filled)) |
| 160 | + return bar |
| 161 | +} |
| 162 | + |
| 163 | +function formatContextMessage(breakdown: TokenBreakdown): string { |
| 164 | + const lines: string[] = [] |
| 165 | + const barWidth = 30 |
| 166 | + |
| 167 | + const values = [ |
| 168 | + breakdown.system, |
| 169 | + breakdown.user, |
| 170 | + breakdown.assistant, |
| 171 | + breakdown.reasoning, |
| 172 | + breakdown.tools, |
| 173 | + breakdown.pruned, |
| 174 | + ] |
| 175 | + const maxValue = Math.max(...values) |
| 176 | + |
| 177 | + const categories = [ |
| 178 | + { label: "System", value: breakdown.system, char: "█" }, |
| 179 | + { label: "User", value: breakdown.user, char: "▓" }, |
| 180 | + { label: "Assistant", value: breakdown.assistant, char: "▒" }, |
| 181 | + { label: "Reasoning", value: breakdown.reasoning, char: "░" }, |
| 182 | + { label: "Tools", value: breakdown.tools, char: "▓" }, |
| 183 | + { label: "Pruned", value: breakdown.pruned, char: "⣿", isSaved: true }, |
| 184 | + ] as const |
| 185 | + |
| 186 | + lines.push("╭───────────────────────────────────────────────────────────╮") |
| 187 | + lines.push("│ DCP Context Analysis │") |
| 188 | + lines.push("╰───────────────────────────────────────────────────────────╯") |
| 189 | + lines.push("") |
| 190 | + lines.push("Session Context Breakdown:") |
| 191 | + lines.push("─".repeat(60)) |
| 192 | + lines.push("") |
| 193 | + |
| 194 | + for (const cat of categories) { |
| 195 | + const bar = createBar(cat.value, maxValue, barWidth, cat.char) |
| 196 | + |
| 197 | + let labelWithPct: string |
| 198 | + let valueStr: string |
| 199 | + if ("isSaved" in cat && cat.isSaved) { |
| 200 | + labelWithPct = cat.label.padEnd(16) |
| 201 | + valueStr = `${formatTokenCount(cat.value).replace(" tokens", "").padStart(6)} saved` |
| 202 | + } else { |
| 203 | + const percentage = |
| 204 | + breakdown.total > 0 ? ((cat.value / breakdown.total) * 100).toFixed(1) : "0.0" |
| 205 | + labelWithPct = `${cat.label.padEnd(9)} ${percentage.padStart(5)}%` |
| 206 | + valueStr = formatTokenCount(cat.value).padStart(13) |
| 207 | + } |
| 208 | + |
| 209 | + lines.push(`${labelWithPct}│${bar.padEnd(barWidth)}│${valueStr}`) |
| 210 | + } |
| 211 | + |
| 212 | + lines.push("") |
| 213 | + lines.push("─".repeat(60)) |
| 214 | + lines.push("") |
| 215 | + |
| 216 | + lines.push("Summary:") |
| 217 | + |
| 218 | + if (breakdown.pruned > 0) { |
| 219 | + const withoutPruning = breakdown.total + breakdown.pruned |
| 220 | + const savingsPercent = ((breakdown.pruned / withoutPruning) * 100).toFixed(1) |
| 221 | + lines.push( |
| 222 | + ` Current context: ~${formatTokenCount(breakdown.total)} (${savingsPercent}% saved)`, |
| 223 | + ) |
| 224 | + lines.push(` Without DCP: ~${formatTokenCount(withoutPruning)}`) |
| 225 | + } else { |
| 226 | + lines.push(` Current context: ~${formatTokenCount(breakdown.total)}`) |
| 227 | + } |
| 228 | + |
| 229 | + lines.push("") |
| 230 | + |
| 231 | + return lines.join("\n") |
| 232 | +} |
| 233 | + |
| 234 | +export async function handleContextCommand(ctx: ContextCommandContext): Promise<void> { |
| 235 | + const { client, state, logger, sessionId, messages } = ctx |
| 236 | + |
| 237 | + const breakdown = analyzeTokens(state, messages) |
| 238 | + |
| 239 | + const message = formatContextMessage(breakdown) |
| 240 | + |
| 241 | + const params = getCurrentParams(state, messages, logger) |
| 242 | + await sendIgnoredMessage(client, sessionId, message, params, logger) |
| 243 | +} |
0 commit comments