Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 21 additions & 14 deletions frontend/app/aipanel/aipanel-contextmenu.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@

import { waveAIHasSelection } from "@/app/aipanel/waveai-focus-utils";
import { ContextMenuModel } from "@/app/store/contextmenu";
import { isDev } from "@/app/store/global";
import { atoms, isDev } from "@/app/store/global";
import { globalStore } from "@/app/store/jotaiStore";
import { RpcApi } from "@/app/store/wshclientapi";
import { TabRpcClient } from "@/app/store/wshrpcutil";
import { WaveAIModel } from "./waveai-model";
Expand Down Expand Up @@ -38,41 +39,47 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo
oref: model.orefContext,
});

const currentThinkingLevel = rtInfo?.["waveai:thinkinglevel"] ?? "medium";
const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom);
const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;
const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "balanced" : "quick");
const defaultTokens = model.inBuilder ? 24576 : 4096;
const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens;

const thinkingLevelSubmenu: ContextMenuItem[] = [
const thinkingModeSubmenu: ContextMenuItem[] = [
{
label: "Low",
label: "Quick (gpt-5-mini)",
type: "checkbox",
checked: currentThinkingLevel === "low",
checked: currentThinkingMode === "quick",
click: () => {
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
data: { "waveai:thinkinglevel": "low" },
data: { "waveai:thinkingmode": "quick" },
});
},
},
{
label: "Medium",
label: hasPremium ? "Balanced (gpt-5, low thinking)" : "Balanced (premium)",
type: "checkbox",
checked: currentThinkingLevel === "medium",
checked: currentThinkingMode === "balanced",
enabled: hasPremium,
click: () => {
if (!hasPremium) return;
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
data: { "waveai:thinkinglevel": "medium" },
data: { "waveai:thinkingmode": "balanced" },
});
},
},
{
label: "High",
label: hasPremium ? "Deep (gpt-5, full thinking)" : "Deep (premium)",
type: "checkbox",
checked: currentThinkingLevel === "high",
checked: currentThinkingMode === "deep",
enabled: hasPremium,
click: () => {
if (!hasPremium) return;
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
data: { "waveai:thinkinglevel": "high" },
data: { "waveai:thinkingmode": "deep" },
});
},
},
Expand Down Expand Up @@ -157,8 +164,8 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo
}

menu.push({
label: "Thinking Level",
submenu: thinkingLevelSubmenu,
label: "Thinking Mode",
submenu: thinkingModeSubmenu,
});

menu.push({
Expand Down
6 changes: 5 additions & 1 deletion frontend/app/aipanel/aipanel.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import { AIPanelInput } from "./aipanelinput";
import { AIPanelMessages } from "./aipanelmessages";
import { AIRateLimitStrip } from "./airatelimitstrip";
import { TelemetryRequiredMessage } from "./telemetryrequired";
import { ThinkingLevelDropdown } from "./thinkingmode";
import { WaveAIModel } from "./waveai-model";

const AIBlockMask = memo(() => {
Expand Down Expand Up @@ -489,9 +490,12 @@ const AIPanelComponentInner = memo(() => {
<>
{messages.length === 0 && initialLoadDone ? (
<div
className="flex-1 overflow-y-auto p-2"
className="flex-1 overflow-y-auto p-2 relative"
onContextMenu={(e) => handleWaveAIContextMenu(e, true)}
>
<div className="absolute top-2 right-2 z-10">
<ThinkingLevelDropdown />
</div>
{model.inBuilder ? <AIBuilderWelcomeMessage /> : <AIWelcomeMessage />}
</div>
) : (
Expand Down
10 changes: 9 additions & 1 deletion frontend/app/aipanel/aipanelmessages.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import { useAtomValue } from "jotai";
import { memo, useEffect, useRef } from "react";
import { AIMessage } from "./aimessage";
import { ThinkingLevelDropdown } from "./thinkingmode";
import { WaveAIModel } from "./waveai-model";

interface AIPanelMessagesProps {
Expand Down Expand Up @@ -41,7 +42,14 @@ export const AIPanelMessages = memo(({ messages, status, onContextMenu }: AIPane
}, [isPanelOpen]);

return (
<div ref={messagesContainerRef} className="flex-1 overflow-y-auto p-2 space-y-4" onContextMenu={onContextMenu}>
<div
ref={messagesContainerRef}
className="flex-1 overflow-y-auto p-2 space-y-4 relative"
onContextMenu={onContextMenu}
>
<div className="absolute top-2 right-2 z-10">
<ThinkingLevelDropdown />
</div>
{messages.map((message, index) => {
const isLastMessage = index === messages.length - 1;
const isStreaming = status === "streaming" && isLastMessage && message.role === "assistant";
Expand Down
119 changes: 119 additions & 0 deletions frontend/app/aipanel/thinkingmode.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0

import { atoms } from "@/app/store/global";
import { useAtomValue } from "jotai";
import { memo, useRef, useState } from "react";
import { WaveAIModel } from "./waveai-model";

type ThinkingMode = "quick" | "balanced" | "deep";

interface ThinkingModeMetadata {
icon: string;
name: string;
desc: string;
premium: boolean;
}

const ThinkingModeData: Record<ThinkingMode, ThinkingModeMetadata> = {
quick: {
icon: "fa-bolt",
name: "Quick",
desc: "Fastest responses (gpt-5-mini)",
premium: false,
},
balanced: {
icon: "fa-sparkles",
name: "Balanced",
desc: "Good mix of speed and accuracy\n(gpt-5 with minimal thinking)",
premium: true,
},
deep: {
icon: "fa-lightbulb",
name: "Deep",
desc: "Slower but most capable\n(gpt-5 with full reasoning)",
premium: true,
},
};

export const ThinkingLevelDropdown = memo(() => {
const model = WaveAIModel.getInstance();
const thinkingMode = useAtomValue(model.thinkingMode);
const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom);
const [isOpen, setIsOpen] = useState(false);
const dropdownRef = useRef<HTMLDivElement>(null);

const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;

const handleSelect = (mode: ThinkingMode) => {
const metadata = ThinkingModeData[mode];
if (!hasPremium && metadata.premium) {
return;
}
model.setThinkingMode(mode);
setIsOpen(false);
};

let currentMode = (thinkingMode as ThinkingMode) || "balanced";
const currentMetadata = ThinkingModeData[currentMode];
if (!hasPremium && currentMetadata.premium) {
currentMode = "quick";
}
Comment on lines +57 to +61
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

State divergence and validation issues remain unresolved.

The concerns raised in previous reviews at lines 57-62 persist:

  1. State divergence: When a non-premium user has a premium mode stored in thinkingMode, currentMode is reassigned to "quick" (line 60), but the atom is never updated via model.setThinkingMode("quick"). The dropdown menu correctly uses currentMode for the selection indicator (line 84), but the underlying atom still holds the premium value, causing inconsistency if other code reads from the atom.

  2. Type-safety gap: The cast (thinkingMode as ThinkingMode) on line 57 assumes the atom contains a valid mode. If the atom holds corrupted or stale data (e.g., a deprecated mode name), line 58 will access undefined from ThinkingModeData[currentMode], causing a runtime error at line 59 when accessing currentMetadata.premium.

  3. Inconsistent fallback: The default "balanced" on line 57 contradicts the PR objective to default non-premium users to "quick". Since "balanced" is premium, the immediate reassignment to "quick" at line 60 masks this issue but adds unnecessary logic.

Recommended fix:

-    let currentMode = (thinkingMode as ThinkingMode) || "balanced";
+    const validModes: ThinkingMode[] = ["quick", "balanced", "deep"];
+    let currentMode = validModes.includes(thinkingMode as ThinkingMode) 
+        ? (thinkingMode as ThinkingMode) 
+        : "quick";
     const currentMetadata = ThinkingModeData[currentMode];
     if (!hasPremium && currentMetadata.premium) {
         currentMode = "quick";
+        // Sync atom to match UI if downgraded
+        if (thinkingMode !== "quick") {
+            queueMicrotask(() => model.setThinkingMode("quick"));
+        }
     }

Note: Using queueMicrotask (or useEffect with [hasPremium, thinkingMode] dependencies) ensures the state update happens after render, avoiding side effects during render which React 19 strict mode double-invokes.

🤖 Prompt for AI Agents
In frontend/app/aipanel/thinkingmode.tsx around lines 57 to 61, the code assigns
currentMode from the atom and silently reassigns it to "quick" for non-premium
users without updating the atom, risks runtime errors by casting without
validation, and uses a contradictory default "balanced"; fix by validating the
atom value against ThinkingModeData and falling back to a non-premium-safe
default ("quick" if !hasPremium, otherwise a known valid default), and when you
change the mode for a non-premium user update the atom via
model.setThinkingMode(...) outside of render (use queueMicrotask or a useEffect
with [hasPremium, thinkingMode] dependencies) so the UI and stored state stay in
sync and you avoid accessing undefined metadata.


Comment on lines +57 to +62
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Inconsistent UI state when premium mode is restricted.

Three issues:

  1. State divergence: When a non-premium user has a premium mode stored in thinkingMode, currentMode is reassigned to "quick" for display (line 60), but the underlying atom is never updated. The button shows "Quick" but line 84 computes isSelected from the original thinkingMode, so the dropdown menu highlights the premium mode as selected. This creates a confusing UX.

  2. Type-safety gap: The cast (thinkingMode as ThinkingMode) assumes the atom contains a valid mode, but corrupted or stale data could cause a runtime error when accessing ThinkingModeData[currentMode].

  3. Inconsistent fallback: Falling back to "balanced" contradicts the PR objective to default non-premium users to "quick".

Recommended fix:

-    let currentMode = (thinkingMode as ThinkingMode) || "balanced";
+    const validModes: ThinkingMode[] = ["quick", "balanced", "deep"];
+    let currentMode = validModes.includes(thinkingMode as ThinkingMode) 
+        ? (thinkingMode as ThinkingMode) 
+        : "quick";
     const currentMetadata = ThinkingModeData[currentMode];
     if (!hasPremium && currentMetadata.premium) {
         currentMode = "quick";
+        // Persist the downgrade if needed
+        if (thinkingMode !== "quick") {
+            model.setThinkingMode("quick");
+        }
     }

Note: Re-introducing a setter during render requires guarding against redundant calls (as shown above) to avoid duplicate RPCs in React 19 strict mode. Alternatively, use a useEffect with [hasPremium, thinkingMode] dependencies to handle the downgrade asynchronously and keep render pure.

return (
<div className="relative" ref={dropdownRef}>
<button
onClick={() => setIsOpen(!isOpen)}
className="flex items-center gap-1.5 px-2 py-1 text-xs text-gray-300 hover:text-white bg-gray-800/50 hover:bg-gray-700/50 rounded transition-colors cursor-pointer border border-gray-600/50"
title={`Thinking: ${currentMetadata.name}`}
>
<i className={`fa ${currentMetadata.icon} text-[10px]`}></i>
<span className="text-[11px]">{currentMetadata.name}</span>
<i className="fa fa-chevron-down text-[8px]"></i>
</button>

{isOpen && (
<>
<div className="fixed inset-0 z-40" onClick={() => setIsOpen(false)} />
<div className="absolute top-full right-0 mt-1 bg-gray-800 border border-gray-600 rounded shadow-lg z-50 min-w-[280px]">
{(Object.keys(ThinkingModeData) as ThinkingMode[]).map((mode, index) => {
const metadata = ThinkingModeData[mode];
const isFirst = index === 0;
const isLast = index === Object.keys(ThinkingModeData).length - 1;
const isDisabled = !hasPremium && metadata.premium;
const isSelected = currentMode === mode;
return (
<button
key={mode}
onClick={() => handleSelect(mode)}
disabled={isDisabled}
className={`w-full flex flex-col gap-0.5 px-3 ${
isFirst ? "pt-1 pb-0.5" : isLast ? "pt-0.5 pb-1" : "pt-0.5 pb-0.5"
} ${
isDisabled
? "text-gray-500 cursor-not-allowed"
: "text-gray-300 hover:bg-gray-700 cursor-pointer"
} transition-colors text-left`}
>
<div className="flex items-center gap-2 w-full">
<i className={`fa ${metadata.icon}`}></i>
<span className={`text-sm ${isSelected ? "font-bold" : ""}`}>
{metadata.name}
{isDisabled && " (premium)"}
</span>
{isSelected && <i className="fa fa-check ml-auto"></i>}
</div>
<div className="text-xs text-muted pl-5" style={{ whiteSpace: "pre-line" }}>
{metadata.desc}
</div>
</button>
);
})}
</div>
</>
)}
</div>
);
});

ThinkingLevelDropdown.displayName = "ThinkingLevelDropdown";
12 changes: 12 additions & 0 deletions frontend/app/aipanel/waveai-model.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ export class WaveAIModel {
widgetAccessAtom!: jotai.Atom<boolean>;
droppedFiles: jotai.PrimitiveAtom<DroppedFile[]> = jotai.atom([]);
chatId!: jotai.PrimitiveAtom<string>;
thinkingMode: jotai.PrimitiveAtom<string> = jotai.atom("balanced");
errorMessage: jotai.PrimitiveAtom<string> = jotai.atom(null) as jotai.PrimitiveAtom<string>;
modelAtom!: jotai.Atom<string>;
containerWidth: jotai.PrimitiveAtom<number> = jotai.atom(0);
Expand Down Expand Up @@ -331,6 +332,14 @@ export class WaveAIModel {
});
}

setThinkingMode(mode: string) {
globalStore.set(this.thinkingMode, mode);
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: this.orefContext,
data: { "waveai:thinkingmode": mode },
});
}

async loadInitialChat(): Promise<WaveUIMessage[]> {
const rtInfo = await RpcApi.GetRTInfoCommand(TabRpcClient, {
oref: this.orefContext,
Expand All @@ -345,6 +354,9 @@ export class WaveAIModel {
}
globalStore.set(this.chatId, chatIdValue);

const thinkingModeValue = rtInfo?.["waveai:thinkingmode"] ?? "balanced";
globalStore.set(this.thinkingMode, thinkingModeValue);

try {
const chatData = await RpcApi.GetWaveAIChatCommand(TabRpcClient, { chatid: chatIdValue });
const messages: UIMessage[] = chatData?.messages ?? [];
Expand Down
4 changes: 3 additions & 1 deletion frontend/types/gotypes.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -853,7 +853,7 @@ declare global {
"builder:appid"?: string;
"builder:env"?: {[key: string]: string};
"waveai:chatid"?: string;
"waveai:thinkinglevel"?: string;
"waveai:thinkingmode"?: string;
"waveai:maxoutputtokens"?: number;
};

Expand Down Expand Up @@ -1145,6 +1145,8 @@ declare global {
"waveai:firstbytems"?: number;
"waveai:requestdurms"?: number;
"waveai:widgetaccess"?: boolean;
"waveai:thinkinglevel"?: string;
"waveai:thinkingmode"?: string;
"waveai:feedback"?: "good" | "bad";
"waveai:action"?: string;
$set?: TEventUserProps;
Expand Down
9 changes: 9 additions & 0 deletions pkg/aiusechat/uctypes/usechat-types.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,12 @@ const (
ThinkingLevelHigh = "high"
)

const (
ThinkingModeQuick = "quick"
ThinkingModeBalanced = "balanced"
ThinkingModeDeep = "deep"
)

const (
ToolUseStatusPending = "pending"
ToolUseStatusError = "error"
Expand Down Expand Up @@ -212,6 +218,7 @@ type AIOptsType struct {
MaxTokens int `json:"maxtokens,omitempty"`
TimeoutMs int `json:"timeoutms,omitempty"`
ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh
ThinkingMode string `json:"thinkingmode,omitempty"` // quick, balanced, or deep
}

func (opts AIOptsType) IsWaveProxy() bool {
Expand Down Expand Up @@ -254,6 +261,8 @@ type AIMetrics struct {
FirstByteLatency int `json:"firstbytelatency"` // ms
RequestDuration int `json:"requestduration"` // ms
WidgetAccess bool `json:"widgetaccess"`
ThinkingLevel string `json:"thinkinglevel,omitempty"`
ThinkingMode string `json:"thinkingmode,omitempty"`
}

// GenAIMessage interface for messages stored in conversations
Expand Down
Loading
Loading