diff --git a/Taskfile.yml b/Taskfile.yml index 6d329f3bf8..0cf2568bba 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -27,6 +27,7 @@ tasks: - build:tsunamiscaffold env: WAVETERM_ENVFILE: "{{.ROOT_DIR}}/.env" + WCLOUD_PING_ENDPOINT: "https://ping-dev.waveterm.dev/central" WCLOUD_ENDPOINT: "https://api-dev.waveterm.dev/central" WCLOUD_WS_ENDPOINT: "wss://wsapi-dev.waveterm.dev" @@ -40,6 +41,7 @@ tasks: - build:backend env: WAVETERM_ENVFILE: "{{.ROOT_DIR}}/.env" + WCLOUD_PING_ENDPOINT: "https://ping-dev.waveterm.dev/central" WCLOUD_ENDPOINT: "https://api-dev.waveterm.dev/central" WCLOUD_WS_ENDPOINT: "wss://wsapi-dev.waveterm.dev" @@ -51,6 +53,7 @@ tasks: - build:backend:quickdev env: WAVETERM_ENVFILE: "{{.ROOT_DIR}}/.env" + WCLOUD_PING_ENDPOINT: "https://ping-dev.waveterm.dev/central" WCLOUD_ENDPOINT: "https://api-dev.waveterm.dev/central" WCLOUD_WS_ENDPOINT: "wss://wsapi-dev.waveterm.dev/" @@ -62,6 +65,7 @@ tasks: - build:backend:quickdev:windows env: WAVETERM_ENVFILE: "{{.ROOT_DIR}}/.env" + WCLOUD_PING_ENDPOINT: "https://ping-dev.waveterm.dev/central" WCLOUD_ENDPOINT: "https://api-dev.waveterm.dev/central" WCLOUD_WS_ENDPOINT: "wss://wsapi-dev.waveterm.dev/" diff --git a/cmd/server/main-server.go b/cmd/server/main-server.go index 5cac190894..c4c2c14649 100644 --- a/cmd/server/main-server.go +++ b/cmd/server/main-server.go @@ -38,6 +38,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/web" "github.com/wavetermdev/waveterm/pkg/wps" "github.com/wavetermdev/waveterm/pkg/wshrpc" + "github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient" "github.com/wavetermdev/waveterm/pkg/wshrpc/wshremote" "github.com/wavetermdev/waveterm/pkg/wshrpc/wshserver" "github.com/wavetermdev/waveterm/pkg/wshutil" @@ -59,6 +60,8 @@ const TelemetryInitialCountsWait = 5 * time.Second const TelemetryCountsInterval = 1 * time.Hour const BackupCleanupTick = 2 * time.Minute const BackupCleanupInterval = 4 * time.Hour +const InitialDiagnosticWait = 5 * time.Minute +const DiagnosticTick = 10 * time.Minute var shutdownOnce sync.Once @@ -128,23 +131,46 @@ func telemetryLoop() { } } -func sendNoTelemetryUpdate(telemetryEnabled bool) { +func diagnosticLoop() { + defer func() { + panichandler.PanicHandler("diagnosticLoop", recover()) + }() + if os.Getenv("WAVETERM_NOPING") != "" { + log.Printf("WAVETERM_NOPING set, disabling diagnostic ping\n") + return + } + var lastSentDate string + time.Sleep(InitialDiagnosticWait) + for { + currentDate := time.Now().Format("2006-01-02") + if lastSentDate == "" || lastSentDate != currentDate { + if sendDiagnosticPing() { + lastSentDate = currentDate + } + } + time.Sleep(DiagnosticTick) + } +} + +func sendDiagnosticPing() bool { ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) defer cancelFn() + + rpcClient := wshclient.GetBareRpcClient() + isOnline, err := wshclient.NetworkOnlineCommand(rpcClient, &wshrpc.RpcOpts{Route: "electron", Timeout: 2000}) + if err != nil || !isOnline { + return false + } clientData, err := wstore.DBGetSingleton[*waveobj.Client](ctx) if err != nil { - log.Printf("telemetry update: error getting client data: %v\n", err) - return + return false } if clientData == nil { - log.Printf("telemetry update: client data is nil\n") - return - } - err = wcloud.SendNoTelemetryUpdate(ctx, clientData.OID, !telemetryEnabled) - if err != nil { - log.Printf("[error] sending no-telemetry update: %v\n", err) - return + return false } + usageTelemetry := telemetry.IsTelemetryEnabled() + wcloud.SendDiagnosticPing(ctx, clientData.OID, usageTelemetry) + return true } func setupTelemetryConfigHandler() { @@ -159,7 +185,7 @@ func setupTelemetryConfigHandler() { newTelemetryEnabled := newConfig.Settings.TelemetryEnabled if newTelemetryEnabled != currentTelemetryEnabled { currentTelemetryEnabled = newTelemetryEnabled - go sendNoTelemetryUpdate(newTelemetryEnabled) + wcore.GoSendNoTelemetryUpdate(newTelemetryEnabled) } }) } @@ -318,8 +344,8 @@ func startupActivityUpdate(firstLaunch bool) { fullConfig := wconfig.GetWatcher().GetFullConfig() props := telemetrydata.TEventProps{ UserSet: &telemetrydata.TEventUserProps{ - ClientVersion: "v" + WaveVersion, - ClientBuildTime: BuildTime, + ClientVersion: "v" + wavebase.WaveVersion, + ClientBuildTime: wavebase.BuildTime, ClientArch: wavebase.ClientArch(), ClientOSRelease: wavebase.UnameKernelRelease(), ClientIsDev: wavebase.IsDevMode(), @@ -533,6 +559,7 @@ func main() { maybeStartPprofServer() go stdinReadWatch() go telemetryLoop() + go diagnosticLoop() setupTelemetryConfigHandler() go updateTelemetryCountsLoop() go backupCleanupLoop() diff --git a/docs/docs/faq.mdx b/docs/docs/faq.mdx index 37c714e610..fe3a2124be 100644 --- a/docs/docs/faq.mdx +++ b/docs/docs/faq.mdx @@ -4,6 +4,8 @@ id: "faq" title: "FAQ" --- +import { VersionBadge } from "@site/src/components/versionbadge"; + # FAQ ### How can I see the block numbers? @@ -52,3 +54,15 @@ If you've installed via Snap, you can use the following command: ```sh sudo snap install waveterm --classic --beta ``` + +## Can I use Wave AI without enabling telemetry? + + + +Yes! Wave AI is normally disabled when telemetry is not enabled. However, you can enable Wave AI features without telemetry by configuring your own custom AI model (either a local model or using your own API key). + +To enable Wave AI without telemetry: +1. Configure a custom AI mode (see [Wave AI documentation](./waveai-modes)) +2. Set `waveai:defaultmode` to your custom mode's key in your Wave settings + +Once you've completed both steps, Wave AI will be enabled and you can use it completely privately without telemetry. This allows you to use local models like Ollama or your own API keys with providers like OpenAI, OpenRouter, or others. diff --git a/docs/docs/telemetry.mdx b/docs/docs/telemetry.mdx index 958e6e3538..2f9132276d 100644 --- a/docs/docs/telemetry.mdx +++ b/docs/docs/telemetry.mdx @@ -8,152 +8,63 @@ id: "telemetry" Wave Terminal collects telemetry data to help us track feature use, direct future product efforts, and generate aggregate metrics on Wave's popularity and usage. We do NOT collect personal information (PII), keystrokes, file contents, AI prompts, IP addresses, hostnames, or commands. We attach all information to an anonymous, randomly generated _ClientId_ (UUID). You may opt out of collection at any time. -Here’s a quick summary of what is collected: +Here's a quick summary of what is collected: -- Basic App/System Info – OS, architecture, app version, update settings -- Usage Metrics – App start/shutdown, active minutes, foreground time, tab/block counts/usage -- Feature Interactions – When you create tabs, run commands, change settings, etc. -- Display Info – Monitor resolution, number of displays -- Connection Events – SSH/WSL connection attempts (but NOT hostnames/IPs) -- AI Commands – Only which AI backend is used (e.g., OpenAI, Claude) – no text or prompts sent -- Error Reports – Crash/panic events with minimal debugging info, but no stack traces or detailed errors +- Basic App/System Info - OS, architecture, app version, update settings +- Usage Metrics - App start/shutdown, active minutes, foreground time, tab/block counts/usage +- Feature Interactions - When you create tabs, run commands, change settings, etc. +- Display Info - Monitor resolution, number of displays +- Connection Events - SSH/WSL connection attempts (but NOT hostnames/IPs) +- Wave AI Usage - Model/provider selection, token counts, request metrics, latency (but NOT prompts or responses) +- Error Reports - Crash/panic events with minimal debugging info, but no stack traces or detailed errors Telemetry can be disabled at any time in settings. If not disabled it is sent on startup, on shutdown, and every 4-hours. ## How to Disable Telemetry -If you would like to turn telemetry on or off, the first opportunity is a button on the initial welcome page. After this, it can be turned off by adding `"telemetry:enabled": false` to the `config/settings.json` file. It can alternatively be turned on by adding `"telemetry:enabled": true` to the `config/settings.json` file. +Telemetry can be enabled or disabled on the initial welcome screen when Wave first starts. After setup, telemetry can be disabled by setting the `telemetry:enabled` key to `false` in Wave’s general configuration file. It can also be disabled using the CLI command `wsh setconfig telemetry:enabled=false`. -:::tip - -You can also change your telemetry setting (true/false) by running the wsh command: +:::info -``` -wsh setconfig telemetry:enabled=true -``` +This document outlines the current telemetry system as of v0.11.1. As of v0.12.5, Wave Terminal no longer sends legacy telemetry. The previous telemetry documentation can be found in our [Legacy Telemetry Documentation](./telemetry-old.mdx) for historical reference. ::: -:::info +## Diagnostics Ping -This document outlines the new telemetry system as of v0.11.1. The previous telemetry documentation is still relevant and can be found in our [Legacy Telemetry Documentation](./telemetry-old.mdx), but in general, the new telemetry is a superset of the old. +Wave sends a small, anonymous diagnostics ping after the app has been running for a short time and at most once per day thereafter. This is used to estimate active installs and understand which versions are still in use, so we can make informed decisions about ongoing support and deprecations. -::: +The ping includes only: your Wave version, OS/CPU arch, local date (yyyy-mm-dd, no timezone or clock time), your randomly generated anonymous client ID, and whether usage telemetry is enabled or disabled. + +It does not include usage data, commands, files, or any telemetry events. + +This ping is intentionally separate from telemetry so Wave can count active installs. If you'd like to disable it, set the WAVETERM_NOPING environment variable. ## Sending Telemetry -Provided that telemetry is enabled, it is sent 10 seconds after Waveterm is first booted and then again every 4 hours thereafter. It can also be sent in response to a few special cases listed below. When telemetry is sent, it is grouped into individual days as determined by your time zone. Any data from a previous day is marked as `Uploaded` so it will not need to be sent again. +Provided that telemetry is enabled, it is sent shortly after Wave is first launched and then again every 4 hours thereafter. It can also be sent in response to a few special cases listed below. When telemetry is sent, events are marked as sent to prevent duplicate transmissions. ### Sending Once Telemetry is Enabled As soon as telemetry is enabled, a telemetry update is sent regardless of how long it has been since the last send. This does not reset the usual timer for telemetry sends. -### Notifying that Telemetry is Disabled - -As soon as telemetry is disabled, Waveterm sends a special update that notifies us of this change. See [When Telemetry is Turned Off](#when-telemetry-is-turned-off) for more info. The timer still runs in the background but no data is sent. - -### When Waveterm is Closed +### When Wave is Closed Provided that telemetry is enabled, it will be sent when Waveterm is closed. -## Event Types - -Below is a list of the event types collected in the new telemetry system. More events are likely to be added in the future. - -| Event Name | Description | -| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `app:startup` | Logged every time you start the app. Contains basic app information like architecture, version, buildtime, etc. | -| `app:shutdown` | Logged on every shutdown | -| `app:activity` | Logged once per hour of app activity | -| `app:display` | Logged on startup and contains information about size of displays | -| `app:counts` | Logged once per hour when app is active, contains basic counts like number of windows, tabs, workspaces, blocks, number of settings customizations, etc. | -| `action:magnify` | Logged each time a block is magnified | -| `action:settabtheme` | Logged each time a tab theme is changed | -| `action:runaicmd` | Logged each time an AI request is made (no prompt information or text is sent), only sends "ai:backendtype" to know what type of AI backend is being used (OpenAI, Claude, Gemini, etc.) | -| `action:createtab` | Logged when a new tab is created | -| `action:createblock` | Logged when a new block is created (contains the block view type) | -| `wsh:run` | Logged when a wsh command is executed (contains the command type) | -| `debug:panic` | Logged when a backend (Go) panic happens. Contains a debugging string that can be used to find which panic was hit in our source code. No data is sent | -| `conn:connect` | Logged each time a backend ssh/wsl connection connects (logs the conneciton type, no hostname or IP is sent) | -| `conn:connecterror` | Logged when you try to connect but it fails (logs the connection type, no hostname or IP is set, and no detailed error information is sent) | -| `waveai:post` | Logged after AI request completion with usage metrics (tokens, request counts, latency, etc. - no prompts or responses) | - -## Event Properties - -Each event may contain the following properties that are relevant to the particular events. - -| Property | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------ | -| `client:arch` | Wave architecture (darwin, windows, linux) and x64 vs arm64 | -| `client:version` | The Wave version (e.g. v0.11.1) | -| `client:initial_version` | Initial installed wave version | -| `client:buildtime` | The buildtime (more exact wave version) | -| `client:osrelease` | A string representing the version of the OS you're running -- different for darwin, windows, and linux | -| `client:isdev` | True/False if using the dev build | -| `autoupdate:channel` | What auto-update channel you're on (latest vs beta) | -| `autoupdate:enabled` | True/False if auto-updated is enabled | -| `loc:countrycode` | Two character country code (e.g. US, CN, FR, JP) | -| `loc:regioncode` | Two character region code (usually the State or Province within a country) | -| `activity:activeminutes` | For app:activity, a number between 0-60 of how many minutes were active within the hour | -| `activity:fgminutes` | For app:activity, a number between 0-60 of how many minutes Wave was the foreground application | -| `activity:openminutes` | For app:activity, a number between 0-60 of how many minutes Wave was open | -| `action:initiator` | For certain actions logs if the action was initiated by the UI or the backend | -| `debug:panictype` | The string that identifies the panic location within our Go code | -| `block:view` | Type of block, e.g. "preview", "waveai", "term", "sysinfo", etc. | -| `ai:backendtype` | AI backend type (e.g. OpenAI, Gemini, Anthropic, etc.) | -| `wsh:cmd` | The wsh command that was run, e.g. "view", "edit", "run", "editconfig" etc. | -| `wsh:haderror` | True/False whether the wsh command returned an error | -| `conn:conntype` | Type of connnection (ssh / wsl) | -| `display:height` | Height of the main display in px | -| `display:width` | Width of the main display in px | -| `display:dpr` | DPR of the main display | -| `display:count` | How many total displays | -| `display:all` | JSON for all the displays attached (same attributes as above) | -| `count:blocks` | Total number of blocks | -| `count:tabs` | Total number of tabs | -| `count:windows` | Total number of windows | -| `count:workspaces` | Total number of workspaces | -| `count:sshconn` | Total number of SSH connections | -| `count:wslconn` | Total number of WSL connections | -| `count:views` | Counts of the types of blocks (views) | -| `waveai:apitype` | AI API provider (OpenAI, Anthropic, etc.) | -| `waveai:model` | AI model name | -| `waveai:inputtokens` | Number of input tokens used | -| `waveai:outputtokens` | Number of output tokens generated | -| `waveai:requestcount` | Number of requests in conversation | -| `waveai:toolusecount` | Number of tool uses | -| `waveai:tooluseerrorcount` | Number of tool use errors | -| `waveai:tooldetail` | Map of tool names to usage counts | -| `waveai:premiumreq` | Number of premium API requests | -| `waveai:proxyreq` | Number of proxy requests | -| `waveai:haderror` | True/False if request had errors | -| `waveai:imagecount` | Number of images in context | -| `waveai:pdfcount` | Number of PDFs in context | -| `waveai:textdoccount` | Number of text documents in context | -| `waveai:textlen` | Total text length in context | -| `waveai:firstbytems` | Latency to first byte in milliseconds | -| `waveai:requestdurms` | Total request duration in milliseconds | -| `waveai:widgetaccess` | True/False if accessed via widget | +## Event Types and Properties ---- - -## When Telemetry is Turned Off - -When a user disables telemetry, Waveterm sends a notification that their anonymous _ClientId_ has had its telemetry disabled. This is done with the `wcloud.NoTelemetryInputType` type in the source code. Beyond that, no further information is sent unless telemetry is turned on again. If it is turned on again, the previous 30 days of telemetry will be sent. - ---- - -## A Note on IP Addresses +Wave collects the event types and properties described in the summary above. As we add features, new events and properties may be added to track their usage. -Telemetry is uploaded via https, which means your IP address is known to the telemetry server. We **do not** store your IP address in our telemetry table and **do not** associate it with your _ClientId_. +For the complete, current list of all telemetry events and properties, see the source code: [telemetrydata.go](https://github.com/wavetermdev/waveterm/blob/main/pkg/telemetry/telemetrydata/telemetrydata.go) ---- +## GDPR Opt-Out Compliance -## Previously Collected Telemetry Data +When telemetry is disabled, Wave sends a single minimal opt-out record associated with the anonymous client ID, recording that telemetry was turned off and when it occurred. This record is retained for compliance purposes. After that, no telemetry or usage data is sent. -While we believe the data we collect with telemetry is fairly minimal, we cannot make that decision for every user. If you ever change your mind about what has been collected previously, you may request that your data be deleted by emailing us at [support@waveterm.dev](mailto:support@waveterm.dev). If you do, we will need your _ClientId_ to remove it. +## Deleting Your Data ---- +If you want your previously collected telemetry data deleted, email us at support (at) waveterm.dev with your _ClientId_ and we'll remove it. ## Privacy Policy diff --git a/docs/docs/waveai-modes.mdx b/docs/docs/waveai-modes.mdx index d8b94ee460..437a6ba99d 100644 --- a/docs/docs/waveai-modes.mdx +++ b/docs/docs/waveai-modes.mdx @@ -74,6 +74,10 @@ wsh setconfig waveai:defaultmode="ollama-llama" This will make the specified mode the default selection when opening Wave AI features. +:::note +Wave AI normally requires telemetry to be enabled. However, if you configure your own custom model (local or BYOK) and set `waveai:defaultmode` to that custom mode's key, you will not receive telemetry requirement messages. This allows you to use Wave AI features completely privately with your own models. +::: + ### Hiding Wave Cloud Modes If you prefer to use only your local or custom models and want to hide Wave's cloud AI modes from the mode dropdown, set `waveai:showcloudmodes` to `false`: diff --git a/emain/emain-wsh.ts b/emain/emain-wsh.ts index 639f4dfd35..24f8f5bb15 100644 --- a/emain/emain-wsh.ts +++ b/emain/emain-wsh.ts @@ -4,7 +4,7 @@ import { WindowService } from "@/app/store/services"; import { RpcResponseHelper, WshClient } from "@/app/store/wshclient"; import { RpcApi } from "@/app/store/wshclientapi"; -import { Notification, safeStorage } from "electron"; +import { Notification, net, safeStorage } from "electron"; import { getResolvedUpdateChannel } from "emain/updater"; import { unamePlatform } from "./emain-platform"; import { getWebContentsByBlockId, webGetSelector } from "./emain-web"; @@ -102,6 +102,10 @@ export class ElectronWshClientType extends WshClient { }; } + async handle_networkonline(rh: RpcResponseHelper): Promise { + return net.isOnline(); + } + // async handle_workspaceupdate(rh: RpcResponseHelper) { // console.log("workspaceupdate"); // fireAndForget(async () => { diff --git a/frontend/app/aipanel/ai-utils.ts b/frontend/app/aipanel/ai-utils.ts index dd725571d6..8bfd67bdc0 100644 --- a/frontend/app/aipanel/ai-utils.ts +++ b/frontend/app/aipanel/ai-utils.ts @@ -547,7 +547,8 @@ export const getFilteredAIModeConfigs = ( aiModeConfigs: Record, showCloudModes: boolean, inBuilder: boolean, - hasPremium: boolean + hasPremium: boolean, + currentMode?: string ): FilteredAIModeConfigs => { const hideQuick = inBuilder && hasPremium; @@ -560,7 +561,8 @@ export const getFilteredAIModeConfigs = ( .sort(sortByDisplayOrder); const hasCustomModels = otherProviderConfigs.length > 0; - const shouldShowCloudModes = showCloudModes || !hasCustomModels; + const isCurrentModeCloud = currentMode?.startsWith("waveai@") ?? false; + const shouldShowCloudModes = showCloudModes || !hasCustomModels || isCurrentModeCloud; const waveProviderConfigs = shouldShowCloudModes ? allConfigs.filter((config) => config["ai:provider"] === "wave").sort(sortByDisplayOrder) diff --git a/frontend/app/aipanel/aimode.tsx b/frontend/app/aipanel/aimode.tsx index 4c6c52a7a4..9848c2327d 100644 --- a/frontend/app/aipanel/aimode.tsx +++ b/frontend/app/aipanel/aimode.tsx @@ -58,6 +58,7 @@ interface ConfigSection { sectionName: string; configs: AIModeConfigWithMode[]; isIncompatible?: boolean; + noTelemetry?: boolean; } function computeCompatibleSections( @@ -111,12 +112,17 @@ function computeCompatibleSections( function computeWaveCloudSections( waveProviderConfigs: AIModeConfigWithMode[], - otherProviderConfigs: AIModeConfigWithMode[] + otherProviderConfigs: AIModeConfigWithMode[], + telemetryEnabled: boolean ): ConfigSection[] { const sections: ConfigSection[] = []; if (waveProviderConfigs.length > 0) { - sections.push({ sectionName: "Wave AI Cloud", configs: waveProviderConfigs }); + sections.push({ + sectionName: "Wave AI Cloud", + configs: waveProviderConfigs, + noTelemetry: !telemetryEnabled, + }); } if (otherProviderConfigs.length > 0) { sections.push({ sectionName: "Custom", configs: otherProviderConfigs }); @@ -131,39 +137,27 @@ interface AIModeDropdownProps { export const AIModeDropdown = memo(({ compatibilityMode = false }: AIModeDropdownProps) => { const model = WaveAIModel.getInstance(); - const aiMode = useAtomValue(model.currentAIMode); + const currentMode = useAtomValue(model.currentAIMode); const aiModeConfigs = useAtomValue(model.aiModeConfigs); const waveaiModeConfigs = useAtomValue(atoms.waveaiModeConfigAtom); const widgetContextEnabled = useAtomValue(model.widgetAccessAtom); - const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom); + const hasPremium = useAtomValue(model.hasPremiumAtom); const showCloudModes = useAtomValue(getSettingsKeyAtom("waveai:showcloudmodes")); - const defaultMode = useAtomValue(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; + const telemetryEnabled = useAtomValue(getSettingsKeyAtom("telemetry:enabled")) ?? false; const [isOpen, setIsOpen] = useState(false); const dropdownRef = useRef(null); - const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; - const { waveProviderConfigs, otherProviderConfigs } = getFilteredAIModeConfigs( aiModeConfigs, showCloudModes, model.inBuilder, - hasPremium + hasPremium, + currentMode ); - let currentMode = aiMode || defaultMode; - const currentConfig = aiModeConfigs[currentMode]; - if (currentConfig) { - if (!hasPremium && currentConfig["waveai:premium"]) { - currentMode = "waveai@quick"; - } - if (model.inBuilder && hasPremium && currentMode === "waveai@quick") { - currentMode = "waveai@balanced"; - } - } - const sections: ConfigSection[] = compatibilityMode ? computeCompatibleSections(currentMode, aiModeConfigs, waveProviderConfigs, otherProviderConfigs) - : computeWaveCloudSections(waveProviderConfigs, otherProviderConfigs); + : computeWaveCloudSections(waveProviderConfigs, otherProviderConfigs, telemetryEnabled); const showSectionHeaders = compatibilityMode || sections.length > 1; @@ -178,12 +172,17 @@ export const AIModeDropdown = memo(({ compatibilityMode = false }: AIModeDropdow }; const displayConfig = aiModeConfigs[currentMode]; - const displayName = displayConfig ? getModeDisplayName(displayConfig) : "Unknown"; - const displayIcon = displayConfig?.["display:icon"] || "sparkles"; + const displayName = displayConfig ? getModeDisplayName(displayConfig) : `Invalid (${currentMode})`; + const displayIcon = displayConfig ? displayConfig["display:icon"] || "sparkles" : "question"; const resolvedConfig = waveaiModeConfigs[currentMode]; const hasToolsSupport = resolvedConfig && resolvedConfig["ai:capabilities"]?.includes("tools"); const showNoToolsWarning = widgetContextEnabled && resolvedConfig && !hasToolsSupport; + const handleNewChatClick = () => { + model.clearChat(); + setIsOpen(false); + }; + const handleConfigureClick = () => { fireAndForget(async () => { RpcApi.RecordTEventCommand( @@ -201,6 +200,15 @@ export const AIModeDropdown = memo(({ compatibilityMode = false }: AIModeDropdow }); }; + const handleEnableTelemetry = () => { + fireAndForget(async () => { + await RpcApi.WaveAIEnableTelemetryCommand(TabRpcClient); + setTimeout(() => { + model.focusInput(); + }, 100); + }); + }; + return (
+ )} )} {section.configs.map((config, index) => { @@ -267,7 +283,9 @@ export const AIModeDropdown = memo(({ compatibilityMode = false }: AIModeDropdow const isLast = index === section.configs.length - 1 && isLastSection; const isPremiumDisabled = !hasPremium && config["waveai:premium"]; const isIncompatibleDisabled = section.isIncompatible || false; - const isDisabled = isPremiumDisabled || isIncompatibleDisabled; + const isTelemetryDisabled = section.noTelemetry || false; + const isDisabled = + isPremiumDisabled || isIncompatibleDisabled || isTelemetryDisabled; const isSelected = currentMode === config.mode; return ( + -
{errorMessage}
+
+ {errorMessage} + +
); }); AIErrorMessage.displayName = "AIErrorMessage"; +const ConfigChangeModeFixer = memo(() => { + const model = WaveAIModel.getInstance(); + const telemetryEnabled = jotai.useAtomValue(getSettingsKeyAtom("telemetry:enabled")) ?? false; + const aiModeConfigs = jotai.useAtomValue(model.aiModeConfigs); + + useEffect(() => { + model.fixModeAfterConfigChange(); + }, [telemetryEnabled, aiModeConfigs, model]); + + return null; +}); + +ConfigChangeModeFixer.displayName = "ConfigChangeModeFixer"; + const AIPanelComponentInner = memo(() => { const [isDragOver, setIsDragOver] = useState(false); const [isReactDndDragOver, setIsReactDndDragOver] = useState(false); const [initialLoadDone, setInitialLoadDone] = useState(false); const model = WaveAIModel.getInstance(); const containerRef = useRef(null); - const errorMessage = jotai.useAtomValue(model.errorMessage); const isLayoutMode = jotai.useAtomValue(atoms.controlShiftDelayAtom); const showOverlayBlockNums = jotai.useAtomValue(getSettingsKeyAtom("app:showoverlayblocknums")) ?? true; const isFocused = jotai.useAtomValue(model.isWaveAIFocusedAtom); const telemetryEnabled = jotai.useAtomValue(getSettingsKeyAtom("telemetry:enabled")) ?? false; const isPanelVisible = jotai.useAtomValue(model.getPanelVisibleAtom()); + const defaultMode = jotai.useAtomValue(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; + const aiModeConfigs = jotai.useAtomValue(model.aiModeConfigs); + + const hasCustomModes = Object.keys(aiModeConfigs).some((key) => !key.startsWith("waveai@")); + const isUsingCustomMode = !defaultMode.startsWith("waveai@"); + const allowAccess = telemetryEnabled || (hasCustomModes && isUsingCustomMode); const { messages, sendMessage, status, setMessages, error, stop } = useChat({ transport: new DefaultChatTransport({ @@ -244,6 +271,7 @@ const AIPanelComponentInner = memo(() => { msg, chatid: globalStore.get(model.chatId), widgetaccess: globalStore.get(model.widgetAccessAtom), + aimode: globalStore.get(model.currentAIMode), }; if (windowType === "builder") { body.builderid = globalStore.get(atoms.builderId); @@ -331,6 +359,10 @@ const AIPanelComponentInner = memo(() => { }; const handleDragOver = (e: React.DragEvent) => { + if (!allowAccess) { + return; + } + const hasFiles = hasFilesDragged(e.dataTransfer); // Only handle native file drags here, let react-dnd handle FILE_ITEM drags @@ -347,6 +379,10 @@ const AIPanelComponentInner = memo(() => { }; const handleDragEnter = (e: React.DragEvent) => { + if (!allowAccess) { + return; + } + const hasFiles = hasFilesDragged(e.dataTransfer); // Only handle native file drags here, let react-dnd handle FILE_ITEM drags @@ -361,6 +397,10 @@ const AIPanelComponentInner = memo(() => { }; const handleDragLeave = (e: React.DragEvent) => { + if (!allowAccess) { + return; + } + const hasFiles = hasFilesDragged(e.dataTransfer); // Only handle native file drags here, let react-dnd handle FILE_ITEM drags @@ -382,6 +422,13 @@ const AIPanelComponentInner = memo(() => { }; const handleDrop = async (e: React.DragEvent) => { + if (!allowAccess) { + e.preventDefault(); + e.stopPropagation(); + setIsDragOver(false); + return; + } + // Check if this is a FILE_ITEM drag from react-dnd // If so, let react-dnd handle it instead if (!e.dataTransfer.files.length) { @@ -415,8 +462,13 @@ const AIPanelComponentInner = memo(() => { }; const handleFileItemDrop = useCallback( - (draggedFile: DraggedFile) => model.addFileFromRemoteUri(draggedFile), - [model] + (draggedFile: DraggedFile) => { + if (!allowAccess) { + return; + } + model.addFileFromRemoteUri(draggedFile); + }, + [model, allowAccess] ); const [{ isOver, canDrop }, drop] = useDrop( @@ -501,13 +553,14 @@ const AIPanelComponentInner = memo(() => { onClick={handleClick} inert={!isPanelVisible ? true : undefined} > - {(isDragOver || isReactDndDragOver) && } + + {(isDragOver || isReactDndDragOver) && allowAccess && } {showBlockMask && }
- {!telemetryEnabled ? ( + {!allowAccess ? ( ) : ( <> @@ -528,9 +581,7 @@ const AIPanelComponentInner = memo(() => { onContextMenu={(e) => handleWaveAIContextMenu(e, true)} /> )} - {errorMessage && ( - model.clearError()} /> - )} + diff --git a/frontend/app/aipanel/telemetryrequired.tsx b/frontend/app/aipanel/telemetryrequired.tsx index b0043c3ad2..692dec73d5 100644 --- a/frontend/app/aipanel/telemetryrequired.tsx +++ b/frontend/app/aipanel/telemetryrequired.tsx @@ -55,11 +55,23 @@ const TelemetryRequiredMessage = ({ className }: TelemetryRequiredMessageProps) This helps us block abuse by automated systems and ensure it's used by real people like you.

-

+

We never collect your files, prompts, keystrokes, hostnames, or personally identifying information. Wave AI is powered by OpenAI's APIs, please refer to OpenAI's privacy policy for details on how they handle your data.

+

+ For information about BYOK and local model support, see{" "} + + https://docs.waveterm.dev/waveai-modes + + . +