diff --git a/frontend/app/aipanel/aipanel-contextmenu.ts b/frontend/app/aipanel/aipanel-contextmenu.ts
index a783af7a4c..1aa4890cf4 100644
--- a/frontend/app/aipanel/aipanel-contextmenu.ts
+++ b/frontend/app/aipanel/aipanel-contextmenu.ts
@@ -3,7 +3,8 @@
import { waveAIHasSelection } from "@/app/aipanel/waveai-focus-utils";
import { ContextMenuModel } from "@/app/store/contextmenu";
-import { isDev } from "@/app/store/global";
+import { atoms, isDev } from "@/app/store/global";
+import { globalStore } from "@/app/store/jotaiStore";
import { RpcApi } from "@/app/store/wshclientapi";
import { TabRpcClient } from "@/app/store/wshrpcutil";
import { WaveAIModel } from "./waveai-model";
@@ -38,41 +39,47 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo
oref: model.orefContext,
});
- const currentThinkingLevel = rtInfo?.["waveai:thinkinglevel"] ?? "medium";
+ const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom);
+ const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;
+ const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "balanced" : "quick");
const defaultTokens = model.inBuilder ? 24576 : 4096;
const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens;
- const thinkingLevelSubmenu: ContextMenuItem[] = [
+ const thinkingModeSubmenu: ContextMenuItem[] = [
{
- label: "Low",
+ label: "Quick (gpt-5-mini)",
type: "checkbox",
- checked: currentThinkingLevel === "low",
+ checked: currentThinkingMode === "quick",
click: () => {
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
- data: { "waveai:thinkinglevel": "low" },
+ data: { "waveai:thinkingmode": "quick" },
});
},
},
{
- label: "Medium",
+ label: hasPremium ? "Balanced (gpt-5, low thinking)" : "Balanced (premium)",
type: "checkbox",
- checked: currentThinkingLevel === "medium",
+ checked: currentThinkingMode === "balanced",
+ enabled: hasPremium,
click: () => {
+ if (!hasPremium) return;
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
- data: { "waveai:thinkinglevel": "medium" },
+ data: { "waveai:thinkingmode": "balanced" },
});
},
},
{
- label: "High",
+ label: hasPremium ? "Deep (gpt-5, full thinking)" : "Deep (premium)",
type: "checkbox",
- checked: currentThinkingLevel === "high",
+ checked: currentThinkingMode === "deep",
+ enabled: hasPremium,
click: () => {
+ if (!hasPremium) return;
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
- data: { "waveai:thinkinglevel": "high" },
+ data: { "waveai:thinkingmode": "deep" },
});
},
},
@@ -157,8 +164,8 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo
}
menu.push({
- label: "Thinking Level",
- submenu: thinkingLevelSubmenu,
+ label: "Thinking Mode",
+ submenu: thinkingModeSubmenu,
});
menu.push({
diff --git a/frontend/app/aipanel/aipanel.tsx b/frontend/app/aipanel/aipanel.tsx
index 7a2c7e3b11..12fe0da841 100644
--- a/frontend/app/aipanel/aipanel.tsx
+++ b/frontend/app/aipanel/aipanel.tsx
@@ -21,6 +21,7 @@ import { AIPanelInput } from "./aipanelinput";
import { AIPanelMessages } from "./aipanelmessages";
import { AIRateLimitStrip } from "./airatelimitstrip";
import { TelemetryRequiredMessage } from "./telemetryrequired";
+import { ThinkingLevelDropdown } from "./thinkingmode";
import { WaveAIModel } from "./waveai-model";
const AIBlockMask = memo(() => {
@@ -489,9 +490,12 @@ const AIPanelComponentInner = memo(() => {
<>
{messages.length === 0 && initialLoadDone ? (
handleWaveAIContextMenu(e, true)}
>
+
+
+
{model.inBuilder ?
:
}
) : (
diff --git a/frontend/app/aipanel/aipanelmessages.tsx b/frontend/app/aipanel/aipanelmessages.tsx
index 781ce73fd8..a6ef0538b5 100644
--- a/frontend/app/aipanel/aipanelmessages.tsx
+++ b/frontend/app/aipanel/aipanelmessages.tsx
@@ -4,6 +4,7 @@
import { useAtomValue } from "jotai";
import { memo, useEffect, useRef } from "react";
import { AIMessage } from "./aimessage";
+import { ThinkingLevelDropdown } from "./thinkingmode";
import { WaveAIModel } from "./waveai-model";
interface AIPanelMessagesProps {
@@ -41,7 +42,14 @@ export const AIPanelMessages = memo(({ messages, status, onContextMenu }: AIPane
}, [isPanelOpen]);
return (
-
+
+
+
+
{messages.map((message, index) => {
const isLastMessage = index === messages.length - 1;
const isStreaming = status === "streaming" && isLastMessage && message.role === "assistant";
diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/thinkingmode.tsx
new file mode 100644
index 0000000000..007dff1356
--- /dev/null
+++ b/frontend/app/aipanel/thinkingmode.tsx
@@ -0,0 +1,119 @@
+// Copyright 2025, Command Line Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+import { atoms } from "@/app/store/global";
+import { useAtomValue } from "jotai";
+import { memo, useRef, useState } from "react";
+import { WaveAIModel } from "./waveai-model";
+
+type ThinkingMode = "quick" | "balanced" | "deep";
+
+interface ThinkingModeMetadata {
+ icon: string;
+ name: string;
+ desc: string;
+ premium: boolean;
+}
+
+const ThinkingModeData: Record
= {
+ quick: {
+ icon: "fa-bolt",
+ name: "Quick",
+ desc: "Fastest responses (gpt-5-mini)",
+ premium: false,
+ },
+ balanced: {
+ icon: "fa-sparkles",
+ name: "Balanced",
+ desc: "Good mix of speed and accuracy\n(gpt-5 with minimal thinking)",
+ premium: true,
+ },
+ deep: {
+ icon: "fa-lightbulb",
+ name: "Deep",
+ desc: "Slower but most capable\n(gpt-5 with full reasoning)",
+ premium: true,
+ },
+};
+
+export const ThinkingLevelDropdown = memo(() => {
+ const model = WaveAIModel.getInstance();
+ const thinkingMode = useAtomValue(model.thinkingMode);
+ const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom);
+ const [isOpen, setIsOpen] = useState(false);
+ const dropdownRef = useRef(null);
+
+ const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;
+
+ const handleSelect = (mode: ThinkingMode) => {
+ const metadata = ThinkingModeData[mode];
+ if (!hasPremium && metadata.premium) {
+ return;
+ }
+ model.setThinkingMode(mode);
+ setIsOpen(false);
+ };
+
+ let currentMode = (thinkingMode as ThinkingMode) || "balanced";
+ const currentMetadata = ThinkingModeData[currentMode];
+ if (!hasPremium && currentMetadata.premium) {
+ currentMode = "quick";
+ }
+
+ return (
+
+
+
+ {isOpen && (
+ <>
+
setIsOpen(false)} />
+
+ {(Object.keys(ThinkingModeData) as ThinkingMode[]).map((mode, index) => {
+ const metadata = ThinkingModeData[mode];
+ const isFirst = index === 0;
+ const isLast = index === Object.keys(ThinkingModeData).length - 1;
+ const isDisabled = !hasPremium && metadata.premium;
+ const isSelected = currentMode === mode;
+ return (
+
+ );
+ })}
+
+ >
+ )}
+
+ );
+});
+
+ThinkingLevelDropdown.displayName = "ThinkingLevelDropdown";
diff --git a/frontend/app/aipanel/waveai-model.tsx b/frontend/app/aipanel/waveai-model.tsx
index 3f101ec618..f980aa8887 100644
--- a/frontend/app/aipanel/waveai-model.tsx
+++ b/frontend/app/aipanel/waveai-model.tsx
@@ -56,6 +56,7 @@ export class WaveAIModel {
widgetAccessAtom!: jotai.Atom
;
droppedFiles: jotai.PrimitiveAtom = jotai.atom([]);
chatId!: jotai.PrimitiveAtom;
+ thinkingMode: jotai.PrimitiveAtom = jotai.atom("balanced");
errorMessage: jotai.PrimitiveAtom = jotai.atom(null) as jotai.PrimitiveAtom;
modelAtom!: jotai.Atom;
containerWidth: jotai.PrimitiveAtom = jotai.atom(0);
@@ -331,6 +332,14 @@ export class WaveAIModel {
});
}
+ setThinkingMode(mode: string) {
+ globalStore.set(this.thinkingMode, mode);
+ RpcApi.SetRTInfoCommand(TabRpcClient, {
+ oref: this.orefContext,
+ data: { "waveai:thinkingmode": mode },
+ });
+ }
+
async loadInitialChat(): Promise {
const rtInfo = await RpcApi.GetRTInfoCommand(TabRpcClient, {
oref: this.orefContext,
@@ -345,6 +354,9 @@ export class WaveAIModel {
}
globalStore.set(this.chatId, chatIdValue);
+ const thinkingModeValue = rtInfo?.["waveai:thinkingmode"] ?? "balanced";
+ globalStore.set(this.thinkingMode, thinkingModeValue);
+
try {
const chatData = await RpcApi.GetWaveAIChatCommand(TabRpcClient, { chatid: chatIdValue });
const messages: UIMessage[] = chatData?.messages ?? [];
diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts
index b6915ef903..a4051e0204 100644
--- a/frontend/types/gotypes.d.ts
+++ b/frontend/types/gotypes.d.ts
@@ -853,7 +853,7 @@ declare global {
"builder:appid"?: string;
"builder:env"?: {[key: string]: string};
"waveai:chatid"?: string;
- "waveai:thinkinglevel"?: string;
+ "waveai:thinkingmode"?: string;
"waveai:maxoutputtokens"?: number;
};
@@ -1145,6 +1145,8 @@ declare global {
"waveai:firstbytems"?: number;
"waveai:requestdurms"?: number;
"waveai:widgetaccess"?: boolean;
+ "waveai:thinkinglevel"?: string;
+ "waveai:thinkingmode"?: string;
"waveai:feedback"?: "good" | "bad";
"waveai:action"?: string;
$set?: TEventUserProps;
diff --git a/pkg/aiusechat/uctypes/usechat-types.go b/pkg/aiusechat/uctypes/usechat-types.go
index 4b6ac97695..8415fd56e6 100644
--- a/pkg/aiusechat/uctypes/usechat-types.go
+++ b/pkg/aiusechat/uctypes/usechat-types.go
@@ -123,6 +123,12 @@ const (
ThinkingLevelHigh = "high"
)
+const (
+ ThinkingModeQuick = "quick"
+ ThinkingModeBalanced = "balanced"
+ ThinkingModeDeep = "deep"
+)
+
const (
ToolUseStatusPending = "pending"
ToolUseStatusError = "error"
@@ -212,6 +218,7 @@ type AIOptsType struct {
MaxTokens int `json:"maxtokens,omitempty"`
TimeoutMs int `json:"timeoutms,omitempty"`
ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh
+ ThinkingMode string `json:"thinkingmode,omitempty"` // quick, balanced, or deep
}
func (opts AIOptsType) IsWaveProxy() bool {
@@ -254,6 +261,8 @@ type AIMetrics struct {
FirstByteLatency int `json:"firstbytelatency"` // ms
RequestDuration int `json:"requestduration"` // ms
WidgetAccess bool `json:"widgetaccess"`
+ ThinkingLevel string `json:"thinkinglevel,omitempty"`
+ ThinkingMode string `json:"thinkingmode,omitempty"`
}
// GenAIMessage interface for messages stored in conversations
diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go
index 70e07a1baa..8ca1acf050 100644
--- a/pkg/aiusechat/usechat.go
+++ b/pkg/aiusechat/usechat.go
@@ -108,33 +108,50 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo *waveobj.ObjRTInfo
if rtInfo != nil && rtInfo.WaveAIMaxOutputTokens > 0 {
maxTokens = rtInfo.WaveAIMaxOutputTokens
}
+ var thinkingMode string
+ if premium {
+ thinkingMode = uctypes.ThinkingModeBalanced
+ if rtInfo != nil && rtInfo.WaveAIThinkingMode != "" {
+ thinkingMode = rtInfo.WaveAIThinkingMode
+ }
+ } else {
+ thinkingMode = uctypes.ThinkingModeQuick
+ }
if DefaultAPI == APIType_Anthropic {
thinkingLevel := uctypes.ThinkingLevelMedium
- if rtInfo != nil && rtInfo.WaveAIThinkingLevel != "" {
- thinkingLevel = rtInfo.WaveAIThinkingLevel
- }
return &uctypes.AIOptsType{
APIType: APIType_Anthropic,
Model: uctypes.DefaultAnthropicModel,
MaxTokens: maxTokens,
ThinkingLevel: thinkingLevel,
+ ThinkingMode: thinkingMode,
BaseURL: baseUrl,
}, nil
} else if DefaultAPI == APIType_OpenAI {
- model := uctypes.DefaultOpenAIModel
- thinkingLevel := uctypes.ThinkingLevelLow
- if premium {
+ var model string
+ var thinkingLevel string
+
+ switch thinkingMode {
+ case uctypes.ThinkingModeQuick:
+ model = uctypes.DefaultOpenAIModel
+ thinkingLevel = uctypes.ThinkingLevelLow
+ case uctypes.ThinkingModeBalanced:
+ model = uctypes.PremiumOpenAIModel
+ thinkingLevel = uctypes.ThinkingLevelLow
+ case uctypes.ThinkingModeDeep:
model = uctypes.PremiumOpenAIModel
thinkingLevel = uctypes.ThinkingLevelMedium
- if rtInfo != nil && rtInfo.WaveAIThinkingLevel != "" {
- thinkingLevel = rtInfo.WaveAIThinkingLevel
- }
+ default:
+ model = uctypes.PremiumOpenAIModel
+ thinkingLevel = uctypes.ThinkingLevelLow
}
+
return &uctypes.AIOptsType{
APIType: APIType_OpenAI,
Model: model,
MaxTokens: maxTokens,
ThinkingLevel: thinkingLevel,
+ ThinkingMode: thinkingMode,
BaseURL: baseUrl,
}, nil
}
@@ -398,8 +415,10 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, chatOpts uctyp
APIType: chatOpts.Config.APIType,
Model: chatOpts.Config.Model,
},
- WidgetAccess: chatOpts.WidgetAccess,
- ToolDetail: make(map[string]int),
+ WidgetAccess: chatOpts.WidgetAccess,
+ ToolDetail: make(map[string]int),
+ ThinkingLevel: chatOpts.Config.ThinkingLevel,
+ ThinkingMode: chatOpts.Config.ThinkingMode,
}
firstStep := true
var cont *uctypes.WaveContinueResponse
@@ -611,6 +630,8 @@ func sendAIMetricsTelemetry(ctx context.Context, metrics *uctypes.AIMetrics) {
WaveAIFirstByteMs: metrics.FirstByteLatency,
WaveAIRequestDurMs: metrics.RequestDuration,
WaveAIWidgetAccess: metrics.WidgetAccess,
+ WaveAIThinkingLevel: metrics.ThinkingLevel,
+ WaveAIThinkingMode: metrics.ThinkingMode,
})
_ = telemetry.RecordTEvent(ctx, event)
}
diff --git a/pkg/telemetry/telemetrydata/telemetrydata.go b/pkg/telemetry/telemetrydata/telemetrydata.go
index 2e9a372440..282d5e48eb 100644
--- a/pkg/telemetry/telemetrydata/telemetrydata.go
+++ b/pkg/telemetry/telemetrydata/telemetrydata.go
@@ -144,6 +144,8 @@ type TEventProps struct {
WaveAIFirstByteMs int `json:"waveai:firstbytems,omitempty"` // ms
WaveAIRequestDurMs int `json:"waveai:requestdurms,omitempty"` // ms
WaveAIWidgetAccess bool `json:"waveai:widgetaccess,omitempty"`
+ WaveAIThinkingLevel string `json:"waveai:thinkinglevel,omitempty"`
+ WaveAIThinkingMode string `json:"waveai:thinkingmode,omitempty"`
WaveAIFeedback string `json:"waveai:feedback,omitempty" tstype:"\"good\" | \"bad\""`
WaveAIAction string `json:"waveai:action,omitempty"`
diff --git a/pkg/waveobj/objrtinfo.go b/pkg/waveobj/objrtinfo.go
index d484c8f63d..30afe48b07 100644
--- a/pkg/waveobj/objrtinfo.go
+++ b/pkg/waveobj/objrtinfo.go
@@ -23,6 +23,6 @@ type ObjRTInfo struct {
BuilderEnv map[string]string `json:"builder:env,omitempty"`
WaveAIChatId string `json:"waveai:chatid,omitempty"`
- WaveAIThinkingLevel string `json:"waveai:thinkinglevel,omitempty"`
+ WaveAIThinkingMode string `json:"waveai:thinkingmode,omitempty"`
WaveAIMaxOutputTokens int `json:"waveai:maxoutputtokens,omitempty"`
}