11/**
2- * DCP Context command handler.
2+ * DCP Context Command
33 * Shows a visual breakdown of token usage in the current session.
4+ *
5+ * TOKEN CALCULATION STRATEGY
6+ * ==========================
7+ * We minimize tokenizer estimation by leveraging API-reported values wherever possible.
8+ *
9+ * WHAT WE GET FROM THE API (exact):
10+ * - tokens.input : Input tokens for each assistant response
11+ * - tokens.output : Output tokens generated (includes text + tool calls)
12+ * - tokens.reasoning: Reasoning tokens used
13+ * - tokens.cache : Cache read/write tokens
14+ *
15+ * HOW WE CALCULATE EACH CATEGORY:
16+ *
17+ * SYSTEM = firstAssistant.input + cache.read - tokenizer(firstUserMessage)
18+ * The first response's input contains system + first user message.
19+ *
20+ * TOOLS = tokenizer(toolInputs + toolOutputs) - prunedTokens
21+ * We must tokenize tools anyway for pruning decisions.
22+ *
23+ * USER = tokenizer(all user messages)
24+ * User messages are typically small, so estimation is acceptable.
25+ *
26+ * ASSISTANT = total - system - user - tools
27+ * Calculated as residual. This absorbs:
28+ * - Assistant text output tokens
29+ * - Reasoning tokens (if persisted by the model)
30+ * - Any estimation errors
31+ *
32+ * TOTAL = input + output + reasoning + cache.read + cache.write
33+ * Matches opencode's UI display.
34+ *
35+ * WHY ASSISTANT IS THE RESIDUAL:
36+ * If reasoning tokens persist in context (model-dependent), they semantically
37+ * belong with "Assistant" since reasoning IS assistant-generated content.
438 */
539
640import type { Logger } from "../logger"
@@ -24,9 +58,10 @@ interface TokenBreakdown {
2458 system : number
2559 user : number
2660 assistant : number
27- reasoning : number
2861 tools : number
29- pruned : number
62+ toolCount : number
63+ prunedTokens : number
64+ prunedCount : number
3065 total : number
3166}
3267
@@ -35,9 +70,10 @@ function analyzeTokens(state: SessionState, messages: WithParts[]): TokenBreakdo
3570 system : 0 ,
3671 user : 0 ,
3772 assistant : 0 ,
38- reasoning : 0 ,
3973 tools : 0 ,
40- pruned : state . stats . totalPruneTokens ,
74+ toolCount : 0 ,
75+ prunedTokens : state . stats . totalPruneTokens ,
76+ prunedCount : state . prune . toolIds . length ,
4177 total : 0 ,
4278 }
4379
@@ -52,26 +88,6 @@ function analyzeTokens(state: SessionState, messages: WithParts[]): TokenBreakdo
5288 }
5389 }
5490
55- let firstUserTokens = 0
56- for ( const msg of messages ) {
57- if ( msg . info . role === "user" && ! isIgnoredUserMessage ( msg ) ) {
58- for ( const part of msg . parts ) {
59- if ( part . type === "text" ) {
60- const textPart = part as TextPart
61- firstUserTokens += countTokens ( textPart . text || "" )
62- }
63- }
64- break
65- }
66- }
67-
68- // Calculate system tokens: first response's total input minus first user message
69- if ( firstAssistant ) {
70- const firstInput =
71- ( firstAssistant . tokens ?. input || 0 ) + ( firstAssistant . tokens ?. cache ?. read || 0 )
72- breakdown . system = Math . max ( 0 , firstInput - firstUserTokens )
73- }
74-
7591 let lastAssistant : AssistantMessage | undefined
7692 for ( let i = messages . length - 1 ; i >= 0 ; i -- ) {
7793 const msg = messages [ i ]
@@ -84,71 +100,74 @@ function analyzeTokens(state: SessionState, messages: WithParts[]): TokenBreakdo
84100 }
85101 }
86102
87- // Get total from API
88- // Total = input + output + reasoning + cache.read + cache.write
89103 const apiInput = lastAssistant ?. tokens ?. input || 0
90104 const apiOutput = lastAssistant ?. tokens ?. output || 0
91105 const apiReasoning = lastAssistant ?. tokens ?. reasoning || 0
92106 const apiCacheRead = lastAssistant ?. tokens ?. cache ?. read || 0
93107 const apiCacheWrite = lastAssistant ?. tokens ?. cache ?. write || 0
94- const apiTotal = apiInput + apiOutput + apiReasoning + apiCacheRead + apiCacheWrite
95-
96- for ( const msg of messages ) {
97- if ( isMessageCompacted ( state , msg ) ) {
98- continue
99- }
108+ breakdown . total = apiInput + apiOutput + apiReasoning + apiCacheRead + apiCacheWrite
100109
101- if ( msg . info . role === "user" && isIgnoredUserMessage ( msg ) ) {
102- continue
103- }
110+ const userTextParts : string [ ] = [ ]
111+ const toolInputParts : string [ ] = [ ]
112+ const toolOutputParts : string [ ] = [ ]
113+ let firstUserText = ""
114+ let foundFirstUser = false
104115
105- const info = msg . info
106- const role = info . role
116+ for ( const msg of messages ) {
117+ if ( isMessageCompacted ( state , msg ) ) continue
118+ if ( msg . info . role === "user" && isIgnoredUserMessage ( msg ) ) continue
107119
108120 for ( const part of msg . parts ) {
109- switch ( part . type ) {
110- case "text" : {
111- const textPart = part as TextPart
112- const tokens = countTokens ( textPart . text || "" )
113- if ( role === "user" ) {
114- breakdown . user += tokens
115- } else {
116- breakdown . assistant += tokens
117- }
118- break
121+ if ( part . type === "text" && msg . info . role === "user" ) {
122+ const textPart = part as TextPart
123+ const text = textPart . text || ""
124+ userTextParts . push ( text )
125+ if ( ! foundFirstUser ) {
126+ firstUserText += text
119127 }
120- case "tool" : {
121- const toolPart = part as ToolPart
122-
123- if ( toolPart . state ?. input ) {
124- const inputStr =
125- typeof toolPart . state . input === "string"
126- ? toolPart . state . input
127- : JSON . stringify ( toolPart . state . input )
128- breakdown . tools += countTokens ( inputStr )
129- }
130-
131- if ( toolPart . state ?. status === "completed" && toolPart . state ?. output ) {
132- const outputStr =
133- typeof toolPart . state . output === "string"
134- ? toolPart . state . output
135- : JSON . stringify ( toolPart . state . output )
136- breakdown . tools += countTokens ( outputStr )
137- }
138- break
128+ } else if ( part . type === "tool" ) {
129+ const toolPart = part as ToolPart
130+ breakdown . toolCount ++
131+
132+ if ( toolPart . state ?. input ) {
133+ const inputStr =
134+ typeof toolPart . state . input === "string"
135+ ? toolPart . state . input
136+ : JSON . stringify ( toolPart . state . input )
137+ toolInputParts . push ( inputStr )
138+ }
139+
140+ if ( toolPart . state ?. status === "completed" && toolPart . state ?. output ) {
141+ const outputStr =
142+ typeof toolPart . state . output === "string"
143+ ? toolPart . state . output
144+ : JSON . stringify ( toolPart . state . output )
145+ toolOutputParts . push ( outputStr )
139146 }
140147 }
141148 }
149+
150+ if ( msg . info . role === "user" && ! isIgnoredUserMessage ( msg ) && ! foundFirstUser ) {
151+ foundFirstUser = true
152+ }
142153 }
143154
144- breakdown . tools = Math . max ( 0 , breakdown . tools - breakdown . pruned )
155+ const firstUserTokens = countTokens ( firstUserText )
156+ breakdown . user = countTokens ( userTextParts . join ( "\n" ) )
157+ const toolInputTokens = countTokens ( toolInputParts . join ( "\n" ) )
158+ const toolOutputTokens = countTokens ( toolOutputParts . join ( "\n" ) )
145159
146- // Calculate reasoning as the difference between API total and our counted parts
147- // This handles both interleaved thinking and non-interleaved models correctly
148- const countedParts = breakdown . system + breakdown . user + breakdown . assistant + breakdown . tools
149- breakdown . reasoning = Math . max ( 0 , apiTotal - countedParts )
160+ if ( firstAssistant ) {
161+ const firstInput =
162+ ( firstAssistant . tokens ?. input || 0 ) + ( firstAssistant . tokens ?. cache ?. read || 0 )
163+ breakdown . system = Math . max ( 0 , firstInput - firstUserTokens )
164+ }
150165
151- breakdown . total = apiTotal
166+ breakdown . tools = Math . max ( 0 , toolInputTokens + toolOutputTokens - breakdown . prunedTokens )
167+ breakdown . assistant = Math . max (
168+ 0 ,
169+ breakdown . total - breakdown . system - breakdown . user - breakdown . tools ,
170+ )
152171
153172 return breakdown
154173}
@@ -164,23 +183,18 @@ function formatContextMessage(breakdown: TokenBreakdown): string {
164183 const lines : string [ ] = [ ]
165184 const barWidth = 30
166185
167- const values = [
168- breakdown . system ,
169- breakdown . user ,
170- breakdown . assistant ,
171- breakdown . reasoning ,
172- breakdown . tools ,
173- ]
174- const maxValue = Math . max ( ...values )
186+ const toolsInContext = breakdown . toolCount - breakdown . prunedCount
187+ const toolsLabel = `Tools (${ toolsInContext } )`
175188
176189 const categories = [
177190 { label : "System" , value : breakdown . system , char : "█" } ,
178191 { label : "User" , value : breakdown . user , char : "▓" } ,
179192 { label : "Assistant" , value : breakdown . assistant , char : "▒" } ,
180- { label : "Reasoning" , value : breakdown . reasoning , char : "░" } ,
181- { label : "Tools" , value : breakdown . tools , char : "⣿" } ,
193+ { label : toolsLabel , value : breakdown . tools , char : "░" } ,
182194 ] as const
183195
196+ const maxLabelLen = Math . max ( ...categories . map ( ( c ) => c . label . length ) )
197+
184198 lines . push ( "╭───────────────────────────────────────────────────────────╮" )
185199 lines . push ( "│ DCP Context Analysis │" )
186200 lines . push ( "╰───────────────────────────────────────────────────────────╯" )
@@ -190,10 +204,10 @@ function formatContextMessage(breakdown: TokenBreakdown): string {
190204 lines . push ( "" )
191205
192206 for ( const cat of categories ) {
193- const bar = createBar ( cat . value , maxValue , barWidth , cat . char )
207+ const bar = createBar ( cat . value , breakdown . total , barWidth , cat . char )
194208 const percentage =
195209 breakdown . total > 0 ? ( ( cat . value / breakdown . total ) * 100 ) . toFixed ( 1 ) : "0.0"
196- const labelWithPct = `${ cat . label . padEnd ( 9 ) } ${ percentage . padStart ( 5 ) } % `
210+ const labelWithPct = `${ cat . label . padEnd ( maxLabelLen ) } ${ percentage . padStart ( 5 ) } % `
197211 const valueStr = formatTokenCount ( cat . value ) . padStart ( 13 )
198212 lines . push ( `${ labelWithPct } │${ bar . padEnd ( barWidth ) } │${ valueStr } ` )
199213 }
@@ -204,12 +218,12 @@ function formatContextMessage(breakdown: TokenBreakdown): string {
204218
205219 lines . push ( "Summary:" )
206220
207- if ( breakdown . pruned > 0 ) {
208- const withoutPruning = breakdown . total + breakdown . pruned
209- const savingsPercent = ( ( breakdown . pruned / withoutPruning ) * 100 ) . toFixed ( 1 )
221+ if ( breakdown . prunedTokens > 0 ) {
222+ const withoutPruning = breakdown . total + breakdown . prunedTokens
210223 lines . push (
211- ` Current context: ~ ${ formatTokenCount ( breakdown . total ) } ( ${ savingsPercent } % saved )` ,
224+ ` Pruned: ${ breakdown . prunedCount } tools (~ ${ formatTokenCount ( breakdown . prunedTokens ) } )` ,
212225 )
226+ lines . push ( ` Current context: ~${ formatTokenCount ( breakdown . total ) } ` )
213227 lines . push ( ` Without DCP: ~${ formatTokenCount ( withoutPruning ) } ` )
214228 } else {
215229 lines . push ( ` Current context: ~${ formatTokenCount ( breakdown . total ) } ` )
0 commit comments