From 87f330558b89706e618deb82704b4432088a0692 Mon Sep 17 00:00:00 2001 From: Gabriel Lima <39922116+TheGB0077@users.noreply.github.com> Date: Sat, 7 Mar 2026 13:30:25 -0300 Subject: [PATCH 1/7] feat: multiple openai-compatible providers --- .../components/ProjectPage/ProjectPage.tsx | 11 +- src/browser/features/ChatInput/index.tsx | 27 +- .../Settings/Sections/ModelsSection.tsx | 127 ++++- .../OpenAICompatibleProvidersSection.tsx | 531 ++++++++++++++++++ .../Settings/Sections/ProvidersSection.tsx | 22 +- .../Settings/Sections/System1Section.tsx | 4 +- .../hooks/useModelsFromSettings.test.ts | 4 +- src/browser/hooks/useModelsFromSettings.ts | 66 ++- .../hooks/useOpenAICompatibleProviders.ts | 68 +++ src/browser/stories/mocks/orpc.ts | 6 + .../schemas/openaiCompatibleProvider.ts | 39 ++ src/common/config/schemas/providersConfig.ts | 7 + src/common/orpc/schemas.ts | 4 + src/common/orpc/schemas/api.ts | 105 ++++ src/common/orpc/types.ts | 6 + .../utils/providers/modelEntries.test.ts | 146 +++++ src/node/orpc/router.ts | 30 + src/node/services/providerModelFactory.ts | 75 ++- src/node/services/providerService.ts | 222 ++++++++ 19 files changed, 1445 insertions(+), 55 deletions(-) create mode 100644 src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx create mode 100644 src/browser/hooks/useOpenAICompatibleProviders.ts create mode 100644 src/common/config/schemas/openaiCompatibleProvider.ts diff --git a/src/browser/components/ProjectPage/ProjectPage.tsx b/src/browser/components/ProjectPage/ProjectPage.tsx index bf02d41a9e..900278967b 100644 --- a/src/browser/components/ProjectPage/ProjectPage.tsx +++ b/src/browser/components/ProjectPage/ProjectPage.tsx @@ -14,6 +14,7 @@ import { GitInitBanner } from "../GitInitBanner/GitInitBanner"; import { ConfiguredProvidersBar } from "../ConfiguredProvidersBar/ConfiguredProvidersBar"; import { ConfigureProvidersPrompt } from "../ConfigureProvidersPrompt/ConfigureProvidersPrompt"; import { useProvidersConfig } from "@/browser/hooks/useProvidersConfig"; +import { useOpenAICompatibleProviders } from "@/browser/hooks/useOpenAICompatibleProviders"; import type { ProvidersConfigMap } from "@/common/orpc/types"; import { AgentsInitBanner } from "../AgentsInitBanner/AgentsInitBanner"; import { @@ -60,7 +61,12 @@ function archivedListsEqual( } /** Check if any provider is configured (uses backend-computed isConfigured) */ -function hasConfiguredProvider(config: ProvidersConfigMap | null): boolean { +function hasConfiguredProvider( + config: ProvidersConfigMap | null, + openaiCompatibleConfig: { isConfigured: boolean } | null +): boolean { + if (!config && !openaiCompatibleConfig) return false; + if (openaiCompatibleConfig?.isConfigured) return true; if (!config) return false; return Object.values(config).some((provider) => provider?.isConfigured); } @@ -91,7 +97,8 @@ export const ProjectPage: React.FC = ({ { listener: true } ); const { config: providersConfig, loading: providersLoading } = useProvidersConfig(); - const hasProviders = hasConfiguredProvider(providersConfig); + const { config: openaiCompatibleConfig } = useOpenAICompatibleProviders(); + const hasProviders = hasConfiguredProvider(providersConfig, openaiCompatibleConfig); const shouldShowAgentsInitBanner = !providersLoading && hasProviders && showAgentsInitNudge; // Git repository state for the banner diff --git a/src/browser/features/ChatInput/index.tsx b/src/browser/features/ChatInput/index.tsx index d12a32881b..41d8fd3c52 100644 --- a/src/browser/features/ChatInput/index.tsx +++ b/src/browser/features/ChatInput/index.tsx @@ -28,6 +28,7 @@ import { } from "@/browser/utils/policyUi"; import { usePolicy } from "@/browser/contexts/PolicyContext"; import { useAPI } from "@/browser/contexts/API"; +import { isRegularProviderConfigInfo } from "@/common/orpc/schemas/api"; import { useThinkingLevel } from "@/browser/hooks/useThinkingLevel"; import { normalizeSelectedModel } from "@/common/utils/ai/models"; import { useSendMessageOptions } from "@/browser/hooks/useSendMessageOptions"; @@ -1400,10 +1401,28 @@ const ChatInputInner: React.FC = (props) => { try { const config = await api.providers.getConfig(); if (!signal.aborted) { - setOpenAIKeySet(config?.openai?.apiKeySet ?? false); - setOpenAIProviderEnabled(config?.openai?.isEnabled ?? true); - setMuxGatewayCouponSet(config?.["mux-gateway"]?.couponCodeSet ?? false); - setMuxGatewayEnabled(config?.["mux-gateway"]?.isEnabled ?? true); + const openaiConfig = config?.openai; + const muxGatewayConfig = config?.["mux-gateway"]; + setOpenAIKeySet( + openaiConfig && isRegularProviderConfigInfo(openaiConfig) + ? openaiConfig.apiKeySet + : false + ); + setOpenAIProviderEnabled( + openaiConfig && isRegularProviderConfigInfo(openaiConfig) + ? openaiConfig.isEnabled + : true + ); + setMuxGatewayCouponSet( + muxGatewayConfig && isRegularProviderConfigInfo(muxGatewayConfig) + ? (muxGatewayConfig.couponCodeSet ?? false) + : false + ); + setMuxGatewayEnabled( + muxGatewayConfig && isRegularProviderConfigInfo(muxGatewayConfig) + ? muxGatewayConfig.isEnabled + : true + ); } } catch { // Ignore errors fetching config diff --git a/src/browser/features/Settings/Sections/ModelsSection.tsx b/src/browser/features/Settings/Sections/ModelsSection.tsx index b7e21fc627..c4cacb5818 100644 --- a/src/browser/features/Settings/Sections/ModelsSection.tsx +++ b/src/browser/features/Settings/Sections/ModelsSection.tsx @@ -16,6 +16,7 @@ import { useModelsFromSettings } from "@/browser/hooks/useModelsFromSettings"; import { useRouting } from "@/browser/hooks/useRouting"; import { usePersistedState } from "@/browser/hooks/usePersistedState"; import { useProvidersConfig } from "@/browser/hooks/useProvidersConfig"; +import { useOpenAICompatibleProviders } from "@/browser/hooks/useOpenAICompatibleProviders"; import { KNOWN_MODELS } from "@/common/constants/knownModels"; import { isCodexOauthRequiredModelId } from "@/common/constants/codexOAuth"; import { usePolicy } from "@/browser/contexts/PolicyContext"; @@ -27,6 +28,7 @@ import { import { getAllowedProvidersForUi, isModelAllowedByPolicy } from "@/browser/utils/policyUi"; import { LAST_CUSTOM_MODEL_PROVIDER_KEY } from "@/common/constants/storage"; import type { ProviderModelEntry } from "@/common/orpc/types"; +import { isRegularProviderConfigInfo } from "@/common/orpc/schemas/api"; import { getProviderModelEntryContextWindowTokens, getProviderModelEntryId, @@ -35,7 +37,7 @@ import { import { ModelRow } from "./ModelRow"; // Providers to exclude from the custom models UI (handled specially or internal) -const HIDDEN_PROVIDERS = new Set(["mux-gateway"]); +const HIDDEN_PROVIDERS = new Set(["mux-gateway", "openai-compatible"]); // Shared header cell styles const headerCellBase = "py-1.5 pr-2 text-xs font-medium text-muted"; @@ -61,6 +63,8 @@ interface EditingState { contextWindowTokens: string; mappedToModel: string; focus?: "model" | "context"; + /** Instance ID for openai-compatible providers (e.g., "together-ai") */ + instanceId?: string; } function parseContextWindowTokensInput(value: string): number | null { @@ -128,6 +132,7 @@ export function ModelsSection() { const { api } = useAPI(); const { open: openSettings } = useSettings(); const { config, loading, updateModelsOptimistically } = useProvidersConfig(); + const { config: openaiCompatibleConfig } = useOpenAICompatibleProviders(); const [lastProvider, setLastProvider] = usePersistedState(LAST_CUSTOM_MODEL_PROVIDER_KEY, ""); const [newModelId, setNewModelId] = useState(""); const [editing, setEditing] = useState(null); @@ -143,7 +148,11 @@ export function ModelsSection() { // Read OAuth state from this component's provider config source to avoid // cross-hook timing mismatches while settings are loading/refetching. - const codexOauthConfigured = config?.openai?.codexOauthSet === true; + const openaiConfig = config?.openai; + const codexOauthConfigured = + openaiConfig && isRegularProviderConfigInfo(openaiConfig) + ? openaiConfig.codexOauthSet === true + : false; // "Treat as" dropdown should only list known models — custom models don't have // the metadata (pricing, context window, tokenizer) that mapping inherits. @@ -156,8 +165,12 @@ export function ModelsSection() { const modelExists = useCallback( (provider: string, modelId: string, excludeOriginal?: string): boolean => { if (!config) return false; - const currentModels = config[provider]?.models ?? []; - return currentModels.some((entry) => { + const providerConfig = config[provider]; + const currentModels = + providerConfig && isRegularProviderConfigInfo(providerConfig) + ? (providerConfig.models ?? []) + : []; + return currentModels.some((entry: ProviderModelEntry) => { const currentModelId = getProviderModelEntryId(entry); return currentModelId === modelId && currentModelId !== excludeOriginal; }); @@ -215,7 +228,8 @@ export function ModelsSection() { provider: string, modelId: string, contextWindowTokens: number | null, - mappedToModel: string | null + mappedToModel: string | null, + instanceId?: string ) => { setEditing({ provider, @@ -224,6 +238,7 @@ export function ModelsSection() { contextWindowTokens: contextWindowTokens === null ? "" : String(contextWindowTokens), mappedToModel: mappedToModel ?? "", focus: "model", + instanceId, }); setError(null); }, @@ -235,7 +250,8 @@ export function ModelsSection() { provider: string, modelId: string, contextWindowTokens: number | null, - mappedToModel: string | null + mappedToModel: string | null, + instanceId?: string ) => { setEditing({ provider, @@ -244,6 +260,7 @@ export function ModelsSection() { contextWindowTokens: contextWindowTokens === null ? "" : String(contextWindowTokens), mappedToModel: mappedToModel ?? "", focus: "context", + instanceId, }); setError(null); }, @@ -288,32 +305,67 @@ export function ModelsSection() { mappedTo ); - // Optimistic update - returns new models array for API call - const updatedModels = updateModelsOptimistically(editing.provider, (models) => { - const nextModels: ProviderModelEntry[] = []; - let replaced = false; + const isOpenAICompatible = editing.provider === "openai-compatible" && editing.instanceId; - for (const modelEntry of models) { - if (!replaced && getProviderModelEntryId(modelEntry) === editing.originalModelId) { + // Optimistic update + if (isOpenAICompatible) { + // For openai-compatible, we need to update via the provider's models array + const providerConfig = openaiCompatibleConfig?.providers?.find( + (p) => p.id === editing.instanceId + ); + if (providerConfig) { + const currentModels = providerConfig.models ?? []; + const nextModels: ProviderModelEntry[] = []; + let replaced = false; + + for (const modelEntry of currentModels) { + if (!replaced && getProviderModelEntryId(modelEntry) === editing.originalModelId) { + nextModels.push(replacementEntry); + replaced = true; + continue; + } + nextModels.push(modelEntry); + } + + if (!replaced) { nextModels.push(replacementEntry); - replaced = true; - continue; } - nextModels.push(modelEntry); + // Save in background + void api.openaiCompatibleProviders.setModels({ + instanceId: editing.instanceId!, + models: nextModels, + }); } + } else { + // Regular provider + const updatedModels = updateModelsOptimistically(editing.provider, (models) => { + const nextModels: ProviderModelEntry[] = []; + let replaced = false; + + for (const modelEntry of models) { + if (!replaced && getProviderModelEntryId(modelEntry) === editing.originalModelId) { + nextModels.push(replacementEntry); + replaced = true; + continue; + } + + nextModels.push(modelEntry); + } - if (!replaced) { - nextModels.push(replacementEntry); - } + if (!replaced) { + nextModels.push(replacementEntry); + } - return nextModels; - }); - setEditing(null); + return nextModels; + }); - // Save in background - void api.providers.setModels({ provider: editing.provider, models: updatedModels }); - }, [api, editing, config, modelExists, updateModelsOptimistically]); + // Save in background + void api.providers.setModels({ provider: editing.provider, models: updatedModels }); + } + + setEditing(null); + }, [api, editing, config, modelExists, updateModelsOptimistically, openaiCompatibleConfig]); // Show loading state while config is being fetched if (loading || !config) { @@ -332,6 +384,7 @@ export function ModelsSection() { fullId: string; contextWindowTokens: number | null; mappedToModel: string | null; + instanceId?: string; }> => { const models: Array<{ provider: string; @@ -339,11 +392,13 @@ export function ModelsSection() { fullId: string; contextWindowTokens: number | null; mappedToModel: string | null; + instanceId?: string; }> = []; for (const [provider, providerConfig] of Object.entries(config)) { // Skip hidden providers (mux-gateway models are routed, not managed as a standalone list) if (HIDDEN_PROVIDERS.has(provider)) continue; + if (!isRegularProviderConfigInfo(providerConfig)) continue; if (!providerConfig.models) continue; for (const modelEntry of providerConfig.models) { @@ -358,6 +413,24 @@ export function ModelsSection() { } } + // Add OpenAI-compatible provider models + if (openaiCompatibleConfig?.providers) { + for (const provider of openaiCompatibleConfig.providers) { + if (!provider.models) continue; + for (const modelEntry of provider.models) { + const modelId = getProviderModelEntryId(modelEntry); + models.push({ + provider: "openai-compatible", + modelId, + fullId: `openai-compatible:${provider.id}:${modelId}`, + contextWindowTokens: getProviderModelEntryContextWindowTokens(modelEntry), + mappedToModel: getProviderModelEntryMappedTo(modelEntry), + instanceId: provider.id, + }); + } + } + } + return models; }; @@ -469,7 +542,8 @@ export function ModelsSection() { model.provider, model.modelId, model.contextWindowTokens, - model.mappedToModel + model.mappedToModel, + model.instanceId ) } onStartContextEdit={() => @@ -477,7 +551,8 @@ export function ModelsSection() { model.provider, model.modelId, model.contextWindowTokens, - model.mappedToModel + model.mappedToModel, + model.instanceId ) } onSaveEdit={handleSaveEdit} diff --git a/src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx b/src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx new file mode 100644 index 0000000000..0fa845c4c9 --- /dev/null +++ b/src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx @@ -0,0 +1,531 @@ +import { useCallback, useState } from "react"; +import { Check, ChevronDown, ChevronRight, Loader2, Plus, Trash2, X } from "lucide-react"; + +import type { OpenAICompatibleInstanceInfo, ProviderModelEntry } from "@/common/orpc/types"; +import { useAPI } from "@/browser/contexts/API"; +import { useOpenAICompatibleProviders } from "@/browser/hooks/useOpenAICompatibleProviders"; +import { Button } from "@/browser/components/Button/Button"; +import { getProviderModelEntryId } from "@/common/utils/providers/modelEntries"; + +interface OpenAICompatibleProvidersSectionProps { + isExpanded?: boolean; + onToggle?: () => void; +} + +export function OpenAICompatibleProvidersSection({ + isExpanded: propIsExpanded, + onToggle: propOnToggle, +}: OpenAICompatibleProvidersSectionProps) { + const { api } = useAPI(); + const { config, refresh } = useOpenAICompatibleProviders(); + + const [internalExpanded, setInternalExpanded] = useState(false); + const isExpanded = propIsExpanded ?? internalExpanded; + const onToggle = propOnToggle ?? (() => setInternalExpanded((v) => !v)); + + const providers = config?.providers ?? []; + const isConfigured = config?.isConfigured ?? false; + + const statusDotColor = isConfigured ? "bg-success" : "bg-border-medium"; + const statusDotTitle = isConfigured ? "Configured" : "Not configured"; + + const [isAdding, setIsAdding] = useState(false); + const [editingId, setEditingId] = useState(null); + const [saving, setSaving] = useState(false); + const [error, setError] = useState(null); + + const [newProvider, setNewProvider] = useState({ + id: "", + name: "", + baseUrl: "", + apiKey: "", + }); + + const [editProvider, setEditProvider] = useState({ + name: "", + baseUrl: "", + apiKey: "", + }); + + const [addingModelTo, setAddingModelTo] = useState(null); + const [newModelId, setNewModelId] = useState(""); + + const handleAddModel = useCallback( + async (instanceId: string) => { + if (!api || !newModelId.trim()) return; + + const provider = providers.find((p) => p.id === instanceId); + if (!provider) return; + + const currentModels = provider.models ?? []; + const existingIds = currentModels.map((m) => getProviderModelEntryId(m)); + + if (existingIds.includes(newModelId.trim())) { + setError("Model already exists"); + return; + } + + setSaving(true); + setError(null); + + try { + const newModel: ProviderModelEntry = { id: newModelId.trim() }; + const result = await api.openaiCompatibleProviders.setModels({ + instanceId, + models: [...currentModels, newModel], + }); + + if (!result.success) { + setError(result.error); + setSaving(false); + return; + } + + setAddingModelTo(null); + setNewModelId(""); + await refresh(); + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to add model"); + } finally { + setSaving(false); + } + }, + [api, newModelId, providers, refresh] + ); + + const handleRemoveModel = useCallback( + async (instanceId: string, modelId: string) => { + if (!api) return; + + const provider = providers.find((p) => p.id === instanceId); + if (!provider) return; + + const currentModels = provider.models ?? []; + const newModels = currentModels.filter((m) => getProviderModelEntryId(m) !== modelId); + + setSaving(true); + setError(null); + + try { + const result = await api.openaiCompatibleProviders.setModels({ + instanceId, + models: newModels, + }); + + if (!result.success) { + setError(result.error); + } else { + await refresh(); + } + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to remove model"); + } finally { + setSaving(false); + } + }, + [api, providers, refresh] + ); + + const handleAddProvider = useCallback(async () => { + if (!api || !newProvider.id.trim() || !newProvider.name.trim() || !newProvider.baseUrl.trim()) { + setError("Provider ID, name, and base URL are required"); + return; + } + + setSaving(true); + setError(null); + + try { + const result = await api.openaiCompatibleProviders.addProvider({ + id: newProvider.id.trim(), + name: newProvider.name.trim(), + baseUrl: newProvider.baseUrl.trim(), + apiKey: newProvider.apiKey.trim() || undefined, + }); + + if (!result.success) { + setError(result.error); + setSaving(false); + return; + } + + setNewProvider({ id: "", name: "", baseUrl: "", apiKey: "" }); + setIsAdding(false); + await refresh(); + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to add provider"); + } finally { + setSaving(false); + } + }, [api, newProvider, refresh]); + + const handleUpdateProvider = useCallback( + async (instanceId: string) => { + if (!api || !editProvider.name.trim() || !editProvider.baseUrl.trim()) { + setError("Name and base URL are required"); + return; + } + + setSaving(true); + setError(null); + + try { + const result = await api.openaiCompatibleProviders.updateProvider({ + instanceId, + updates: { + name: editProvider.name.trim(), + baseUrl: editProvider.baseUrl.trim(), + apiKey: editProvider.apiKey.trim() || undefined, + }, + }); + + if (!result.success) { + setError(result.error); + setSaving(false); + return; + } + + setEditingId(null); + setEditProvider({ name: "", baseUrl: "", apiKey: "" }); + await refresh(); + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to update provider"); + } finally { + setSaving(false); + } + }, + [api, editProvider, refresh] + ); + + const handleDeleteProvider = useCallback( + async (instanceId: string) => { + if (!api) return; + + setSaving(true); + setError(null); + + try { + const result = await api.openaiCompatibleProviders.removeProvider({ instanceId }); + + if (!result.success) { + setError(result.error); + setSaving(false); + return; + } + + await refresh(); + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to remove provider"); + } finally { + setSaving(false); + } + }, + [api, refresh] + ); + + const startEditing = (provider: OpenAICompatibleInstanceInfo) => { + setEditingId(provider.id); + setEditProvider({ + name: provider.name, + baseUrl: provider.baseUrl, + apiKey: "", + }); + setError(null); + }; + + const cancelEditing = () => { + setEditingId(null); + setEditProvider({ name: "", baseUrl: "", apiKey: "" }); + setError(null); + }; + + return ( +
+ + + {isExpanded && ( +
+

+ Configure OpenAI-compatible API endpoints (Together AI, Fireworks, LM Studio, etc.). + Models are accessed via{" "} + openai-compatible:provider-id:model-name. +

+ + {error && ( +
{error}
+ )} + + {providers.map((provider: OpenAICompatibleInstanceInfo) => ( +
+ {editingId === provider.id ? ( +
+
+ + setEditProvider({ ...editProvider, name: e.target.value })} + className="bg-background border-border-medium focus:border-accent w-full rounded border px-2 py-1.5 text-xs focus:outline-none" + placeholder="Together AI" + /> +
+
+ + + setEditProvider({ ...editProvider, baseUrl: e.target.value }) + } + className="bg-background border-border-medium focus:border-accent w-full rounded border px-2 py-1.5 font-mono text-xs focus:outline-none" + placeholder="https://api.together.xyz/v1" + /> +
+
+ + setEditProvider({ ...editProvider, apiKey: e.target.value })} + className="bg-background border-border-medium focus:border-accent w-full rounded border px-2 py-1.5 font-mono text-xs focus:outline-none" + placeholder="Leave empty to keep current key" + /> +
+
+ + +
+
+ ) : ( +
+
+
{provider.name}
+
{provider.baseUrl}
+
+ ID: {provider.id} + {" • "} + {provider.apiKeySet ? "API key set" : "No API key"} + {" • "} + {provider.isEnabled ? "Enabled" : "Disabled"} +
+ + {/* Models section */} +
+
Models:
+ {provider.models && provider.models.length > 0 ? ( +
+ {provider.models.map((model) => ( + + {getProviderModelEntryId(model)} + + + ))} +
+ ) : ( +
No models configured
+ )} + + {addingModelTo === provider.id ? ( +
+ setNewModelId(e.target.value)} + placeholder="model-id" + className="bg-background border-border-medium focus:border-accent rounded border px-2 py-0.5 text-xs" + onKeyDown={(e) => { + if (e.key === "Enter") void handleAddModel(provider.id); + if (e.key === "Escape") { + setAddingModelTo(null); + setNewModelId(""); + } + }} + /> + + +
+ ) : ( + + )} +
+
+
+ + +
+
+ )} +
+ ))} + + {isAdding ? ( +
+
+
+ + setNewProvider({ ...newProvider, id: e.target.value })} + className="bg-background border-border-medium focus:border-accent w-full rounded border px-2 py-1.5 font-mono text-xs focus:outline-none" + placeholder="together-ai" + /> +

+ Used in model strings: openai-compatible: + together-ai:model-id +

+
+
+ + setNewProvider({ ...newProvider, name: e.target.value })} + className="bg-background border-border-medium focus:border-accent w-full rounded border px-2 py-1.5 text-xs focus:outline-none" + placeholder="Together AI" + /> +
+
+ + setNewProvider({ ...newProvider, baseUrl: e.target.value })} + className="bg-background border-border-medium focus:border-accent w-full rounded border px-2 py-1.5 font-mono text-xs focus:outline-none" + placeholder="https://api.together.xyz/v1" + /> +
+
+ + setNewProvider({ ...newProvider, apiKey: e.target.value })} + className="bg-background border-border-medium focus:border-accent w-full rounded border px-2 py-1.5 font-mono text-xs focus:outline-none" + placeholder="Enter API key" + /> +
+
+ + +
+
+
+ ) : ( + + )} +
+ )} +
+ ); +} diff --git a/src/browser/features/Settings/Sections/ProvidersSection.tsx b/src/browser/features/Settings/Sections/ProvidersSection.tsx index 6ee12fffed..f02da05d9e 100644 --- a/src/browser/features/Settings/Sections/ProvidersSection.tsx +++ b/src/browser/features/Settings/Sections/ProvidersSection.tsx @@ -31,6 +31,9 @@ import { CSS } from "@dnd-kit/utilities"; import { createEditKeyHandler } from "@/browser/utils/ui/keybinds"; import { getBrowserBackendBaseUrl } from "@/browser/utils/backendBaseUrl"; import { PROVIDER_DEFINITIONS, type ProviderName } from "@/common/constants/providers"; +import type { ProvidersConfigMap } from "@/common/orpc/types"; +import { isRegularProviderConfigInfo } from "@/common/orpc/schemas/api"; +import { OpenAICompatibleProvidersSection } from "./OpenAICompatibleProvidersSection"; import { usePolicy } from "@/browser/contexts/PolicyContext"; import { getAllowedProvidersForUi } from "@/browser/utils/policyUi"; import { ProviderWithIcon } from "@/browser/components/ProviderIcon/ProviderIcon"; @@ -380,10 +383,21 @@ export function ProvidersSection() { ); const [codexOauthAuthorizeUrl, setCodexOauthAuthorizeUrl] = useState(null); - const codexOauthIsConnected = config?.openai?.codexOauthSet === true; - const openaiApiKeySet = config?.openai?.apiKeySet === true; + const openaiConfig = config?.openai; + const codexOauthIsConnected = + openaiConfig && isRegularProviderConfigInfo(openaiConfig) + ? openaiConfig.codexOauthSet === true + : false; + const openaiApiKeySet = + openaiConfig && isRegularProviderConfigInfo(openaiConfig) + ? openaiConfig.apiKeySet === true + : false; const codexOauthDefaultAuth = - config?.openai?.codexOauthDefaultAuth === "apiKey" ? "apiKey" : "oauth"; + openaiConfig && isRegularProviderConfigInfo(openaiConfig) + ? openaiConfig.codexOauthDefaultAuth === "apiKey" + ? "apiKey" + : "oauth" + : "oauth"; const codexOauthDefaultAuthIsEditable = codexOauthIsConnected && openaiApiKeySet; const codexOauthLoginInProgress = @@ -2200,6 +2214,8 @@ export function ProvidersSection() { ); })} + + {config && !hasAnyConfiguredProvider && (
No providers are currently enabled. You won't be able to send messages until you diff --git a/src/browser/features/Settings/Sections/System1Section.tsx b/src/browser/features/Settings/Sections/System1Section.tsx index c6ca010d11..3485344c17 100644 --- a/src/browser/features/Settings/Sections/System1Section.tsx +++ b/src/browser/features/Settings/Sections/System1Section.tsx @@ -14,6 +14,7 @@ import { useAPI } from "@/browser/contexts/API"; import { useOptionalWorkspaceContext } from "@/browser/contexts/WorkspaceContext"; import { getDefaultModel, getSuggestedModels } from "@/browser/hooks/useModelsFromSettings"; import { useProvidersConfig } from "@/browser/hooks/useProvidersConfig"; +import { useOpenAICompatibleProviders } from "@/browser/hooks/useOpenAICompatibleProviders"; import { usePersistedState } from "@/browser/hooks/usePersistedState"; import { getModelKey, @@ -39,6 +40,7 @@ import { getErrorMessage } from "@/common/utils/errors"; export function System1Section() { const { api } = useAPI(); const { config: providersConfig, loading: providersLoading } = useProvidersConfig(); + const { config: openaiCompatibleConfig } = useOpenAICompatibleProviders(); const [taskSettings, setTaskSettings] = useState(DEFAULT_TASK_SETTINGS); const [loaded, setLoaded] = useState(false); @@ -287,7 +289,7 @@ export function System1Section() { ); } - const allModels = getSuggestedModels(providersConfig); + const allModels = getSuggestedModels(providersConfig, openaiCompatibleConfig ?? null); const bashOutputCompactionMinLines = taskSettings.bashOutputCompactionMinLines ?? diff --git a/src/browser/hooks/useModelsFromSettings.test.ts b/src/browser/hooks/useModelsFromSettings.test.ts index 37c5c8dc42..3281c688d2 100644 --- a/src/browser/hooks/useModelsFromSettings.test.ts +++ b/src/browser/hooks/useModelsFromSettings.test.ts @@ -105,7 +105,7 @@ describe("getSuggestedModels", () => { }, }; - const suggested = getSuggestedModels(config); + const suggested = getSuggestedModels(config, null); // Custom models are listed first (in config order) expect(suggested[0]).toBe("openai:my-team-model"); @@ -134,7 +134,7 @@ describe("getSuggestedModels", () => { }, }; - const suggested = getSuggestedModels(config); + const suggested = getSuggestedModels(config, null); expect(suggested).toContain("anthropic:enabled-custom"); expect(suggested).not.toContain("openai:disabled-custom"); diff --git a/src/browser/hooks/useModelsFromSettings.ts b/src/browser/hooks/useModelsFromSettings.ts index 4c20450905..20a8f0fe96 100644 --- a/src/browser/hooks/useModelsFromSettings.ts +++ b/src/browser/hooks/useModelsFromSettings.ts @@ -8,6 +8,7 @@ import { import { WORKSPACE_DEFAULTS } from "@/constants/workspaceDefaults"; import { useProvidersConfig } from "./useProvidersConfig"; import { useRouting } from "./useRouting"; +import { useOpenAICompatibleProviders } from "./useOpenAICompatibleProviders"; import { usePolicy } from "@/browser/contexts/PolicyContext"; import { useAPI } from "@/browser/contexts/API"; import { isValidProvider } from "@/common/constants/providers"; @@ -19,6 +20,10 @@ import { } from "@/common/utils/ai/models"; import { isModelAvailable } from "@/common/routing"; import type { ProviderModelEntry, ProvidersConfigMap } from "@/common/orpc/types"; +import { getModelProvider } from "@/common/utils/ai/models"; +import type { + OpenAICompatibleProvidersInfo, +} from "@/common/orpc/types"; import { DEFAULT_MODEL_KEY, HIDDEN_MODELS_KEY } from "@/common/constants/storage"; import { getProviderModelEntryId } from "@/common/utils/providers/modelEntries"; @@ -26,20 +31,42 @@ import { getProviderModelEntryId } from "@/common/utils/providers/modelEntries"; const BUILT_IN_MODELS: string[] = Object.values(KNOWN_MODELS).map((m) => m.id); const BUILT_IN_MODEL_SET = new Set(BUILT_IN_MODELS); -function getCustomModels(config: ProvidersConfigMap | null): string[] { - if (!config) return []; +function getCustomModels( + config: ProvidersConfigMap | null, + openaiCompatibleConfig: OpenAICompatibleProvidersInfo | null +): string[] { + if (!config && !openaiCompatibleConfig) return []; const models: string[] = []; - for (const [provider, info] of Object.entries(config)) { - // Skip mux-gateway - those models are accessed via the cloud toggle, not listed separately - if (provider === "mux-gateway") continue; - // Only surface custom models from enabled providers - if (!info.isEnabled) continue; - if (!info.models) continue; - for (const modelEntry of info.models) { - const modelId = getProviderModelEntryId(modelEntry); - models.push(`${provider}:${modelId}`); + + // Get models from regular providers + if (config) { + for (const [provider, info] of Object.entries(config)) { + // Skip mux-gateway - those models are accessed via the cloud toggle, not listed separately + if (provider === "mux-gateway") continue; + // Skip openai-compatible - handled separately + if (provider === "openai-compatible") continue; + // Only surface custom models from enabled providers + if (!info.isEnabled) continue; + if (!info.models) continue; + for (const modelEntry of info.models) { + const modelId = getProviderModelEntryId(modelEntry); + models.push(`${provider}:${modelId}`); + } } } + + // Get models from OpenAI-compatible providers + if (openaiCompatibleConfig?.providers) { + for (const provider of openaiCompatibleConfig.providers) { + if (!provider.isEnabled) continue; + if (!provider.models) continue; + for (const modelEntry of provider.models) { + const modelId = getProviderModelEntryId(modelEntry); + models.push(`openai-compatible:${provider.id}:${modelId}`); + } + } + } + return models; } @@ -80,8 +107,11 @@ function dedupeKeepFirst(models: string[]): string[] { return out; } -export function getSuggestedModels(config: ProvidersConfigMap | null): string[] { - const customModels = getCustomModels(config); +export function getSuggestedModels( + config: ProvidersConfigMap | null, + openaiCompatibleConfig: OpenAICompatibleProvidersInfo | null +): string[] { + const customModels = getCustomModels(config, openaiCompatibleConfig); return dedupeKeepFirst([...customModels, ...BUILT_IN_MODELS]); } @@ -121,6 +151,7 @@ export function useModelsFromSettings() { ); const { config, refresh } = useProvidersConfig(); const { routePriority, routeOverrides } = useRouting(); + const { config: openaiCompatibleConfig } = useOpenAICompatibleProviders(); const [defaultModel, setDefaultModel] = usePersistedState( DEFAULT_MODEL_KEY, @@ -156,9 +187,9 @@ export function useModelsFromSettings() { ); const customModels = useMemo(() => { - const next = filterHiddenModels(getCustomModels(config), hiddenModels); + const next = filterHiddenModels(getCustomModels(config, openaiCompatibleConfig), hiddenModels); return effectivePolicy ? next.filter((m) => isModelAllowedByPolicy(effectivePolicy, m)) : next; - }, [config, hiddenModels, effectivePolicy]); + }, [config, openaiCompatibleConfig, hiddenModels, effectivePolicy]); const openaiApiKeySet = config === null ? null : config.openai?.apiKeySet === true; const codexOauthSet = config === null ? null : config.openai?.codexOauthSet === true; @@ -217,7 +248,10 @@ export function useModelsFromSettings() { ); const models = useMemo(() => { - const suggested = filterHiddenModels(getSuggestedModels(config), hiddenModels); + const suggested = filterHiddenModels( + getSuggestedModels(config, openaiCompatibleConfig), + hiddenModels + ); // Hide models that are unavailable from both direct and gateway routes. // Keep all models visible while provider config is still loading to avoid UI flicker. diff --git a/src/browser/hooks/useOpenAICompatibleProviders.ts b/src/browser/hooks/useOpenAICompatibleProviders.ts new file mode 100644 index 0000000000..5fda50ca06 --- /dev/null +++ b/src/browser/hooks/useOpenAICompatibleProviders.ts @@ -0,0 +1,68 @@ +import { useEffect, useState, useCallback, useRef } from "react"; +import { useAPI } from "@/browser/contexts/API"; +import type { OpenAICompatibleProvidersInfo } from "@/common/orpc/types"; + +export function useOpenAICompatibleProviders() { + const { api } = useAPI(); + const [config, setConfig] = useState(null); + const [loading, setLoading] = useState(true); + + const configRef = useRef(null); + const fetchVersionRef = useRef(0); + + const refresh = useCallback(async () => { + if (!api) return; + const myVersion = ++fetchVersionRef.current; + try { + const cfg = await api.openaiCompatibleProviders.getConfig(); + if (myVersion === fetchVersionRef.current) { + configRef.current = cfg; + setConfig(cfg); + } + } catch { + // Ignore errors fetching config + } finally { + if (myVersion === fetchVersionRef.current) { + setLoading(false); + } + } + }, [api]); + + useEffect(() => { + if (!api) return; + + const abortController = new AbortController(); + const { signal } = abortController; + + let iterator: AsyncIterator | null = null; + + void refresh(); + + (async () => { + try { + const subscribedIterator = await api.providers.onConfigChanged(undefined, { signal }); + + if (signal.aborted) { + void subscribedIterator.return?.(); + return; + } + + iterator = subscribedIterator; + + for await (const _ of subscribedIterator) { + if (signal.aborted) break; + void refresh(); + } + } catch { + // Subscription cancelled + } + })(); + + return () => { + abortController.abort(); + void iterator?.return?.(); + }; + }, [api, refresh]); + + return { config, loading, refresh }; +} diff --git a/src/browser/stories/mocks/orpc.ts b/src/browser/stories/mocks/orpc.ts index b5d690c596..987d6d2757 100644 --- a/src/browser/stories/mocks/orpc.ts +++ b/src/browser/stories/mocks/orpc.ts @@ -953,6 +953,12 @@ export function createMockORPCClient(options: MockORPCClientOptions = {}): APICl setProviderConfig: () => Promise.resolve({ success: true, data: undefined }), setModels: () => Promise.resolve({ success: true, data: undefined }), }, + openaiCompatibleProviders: { + addProvider: () => Promise.resolve({ success: true, data: undefined }), + updateProvider: () => Promise.resolve({ success: true, data: undefined }), + removeProvider: () => Promise.resolve({ success: true, data: undefined }), + setModels: () => Promise.resolve({ success: true, data: undefined }), + }, onePassword: { isAvailable: () => Promise.resolve({ available: false }), listVaults: () => Promise.resolve([]), diff --git a/src/common/config/schemas/openaiCompatibleProvider.ts b/src/common/config/schemas/openaiCompatibleProvider.ts new file mode 100644 index 0000000000..f8762d2095 --- /dev/null +++ b/src/common/config/schemas/openaiCompatibleProvider.ts @@ -0,0 +1,39 @@ +import { z } from "zod"; +import { ProviderModelEntrySchema } from "./providerModelEntry"; + +/** + * Schema for a single OpenAI-compatible provider instance. + * Each instance represents a separate API endpoint that uses the OpenAI-compatible API format. + * Examples: Together AI, Fireworks, LM Studio, Jan, custom inference servers. + */ +export const OpenAICompatibleProviderInstanceSchema = z.object({ + /** Unique identifier for this provider instance (used in model strings like "openai-compatible:my-provider:model-id") */ + id: z.string().min(1), + /** Display name shown in the UI */ + name: z.string().min(1), + /** API key for authentication (optional for local servers) */ + apiKey: z.string().optional(), + /** Human-readable label if apiKey is a 1Password reference */ + apiKeyOpLabel: z.string().optional(), + /** Base URL for the API endpoint (required) */ + baseUrl: z.string().url(), + /** Custom headers to send with each request */ + headers: z.record(z.string(), z.string()).optional(), + /** Models available from this provider */ + models: z.array(ProviderModelEntrySchema).optional(), + /** Whether this provider instance is enabled */ + enabled: z.boolean().optional(), +}); + +/** + * Schema for the openai-compatible provider configuration. + * Contains an array of provider instances, each with its own baseUrl, apiKey, and models. + */ +export const OpenAICompatibleProvidersConfigSchema = z.object({ + providers: z.array(OpenAICompatibleProviderInstanceSchema).optional(), +}); + +export type OpenAICompatibleProviderInstance = z.infer< + typeof OpenAICompatibleProviderInstanceSchema +>; +export type OpenAICompatibleProvidersConfig = z.infer; diff --git a/src/common/config/schemas/providersConfig.ts b/src/common/config/schemas/providersConfig.ts index 5c9368a203..608ff239b1 100644 --- a/src/common/config/schemas/providersConfig.ts +++ b/src/common/config/schemas/providersConfig.ts @@ -2,6 +2,8 @@ import { z } from "zod"; import { ModelParametersByModelSchema } from "./modelParameters"; import { ProviderModelEntrySchema } from "./providerModelEntry"; +import { OpenAICompatibleProvidersConfigSchema } from "./openaiCompatibleProvider"; +import type { OpenAICompatibleProviderInstanceSchema } from "./openaiCompatibleProvider"; export const CacheTtlSchema = z.enum(["5m", "1h"]); export const ServiceTierSchema = z.enum(["auto", "default", "flex", "priority"]); @@ -78,6 +80,7 @@ export const ProvidersConfigSchema = z deepseek: DeepSeekProviderConfigSchema.optional(), ollama: OllamaProviderConfigSchema.optional(), "github-copilot": GitHubCopilotProviderConfigSchema.optional(), + "openai-compatible": OpenAICompatibleProvidersConfigSchema.optional(), }) .catchall(BaseProviderConfigSchema); @@ -92,5 +95,9 @@ export type GoogleProviderConfig = z.infer; export type DeepSeekProviderConfig = z.infer; export type OllamaProviderConfig = z.infer; export type GitHubCopilotProviderConfig = z.infer; +export type OpenAICompatibleProvidersConfig = z.infer; +export type OpenAICompatibleProviderInstance = z.infer< + typeof OpenAICompatibleProviderInstanceSchema +>; export type ProvidersConfig = z.infer; diff --git a/src/common/orpc/schemas.ts b/src/common/orpc/schemas.ts index d3bf846309..7df57c3dd4 100644 --- a/src/common/orpc/schemas.ts +++ b/src/common/orpc/schemas.ts @@ -221,6 +221,9 @@ export { mcpOauth, mcp, secrets, + OpenAICompatibleInstanceInfoSchema, + OpenAICompatibleProvidersInfoSchema, + isOpenAICompatibleProvidersInfo, ProviderConfigInfoSchema, ProviderModelEntrySchema, muxGateway, @@ -231,6 +234,7 @@ export { policy, providers, ProvidersConfigMapSchema, + openaiCompatibleProviders, server, ServerAuthSessionSchema, serverAuth, diff --git a/src/common/orpc/schemas/api.ts b/src/common/orpc/schemas/api.ts index 9ea0d5ee75..072608dd85 100644 --- a/src/common/orpc/schemas/api.ts +++ b/src/common/orpc/schemas/api.ts @@ -199,8 +199,71 @@ export const ProviderConfigInfoSchema = z.object({ gatewayModels: z.array(z.string()).optional(), }); +/** + * Frontend schema for an OpenAI-compatible provider instance. + * Represents a single provider instance (e.g., Together AI, Fireworks, LM Studio) + * that uses the OpenAI-compatible API format. + */ +export const OpenAICompatibleInstanceInfoSchema = z.object({ + /** Unique identifier for this provider instance */ + id: z.string(), + /** Display name shown in the UI */ + name: z.string(), + /** Base URL for the API endpoint */ + baseUrl: z.string(), + /** Whether an API key is set */ + apiKeySet: z.boolean(), + /** Whether the API key is a 1Password reference */ + apiKeyIsOpRef: z.boolean().optional(), + /** Human-readable label for 1Password reference */ + apiKeyOpLabel: z.string().optional(), + /** Whether this provider instance is enabled */ + isEnabled: z.boolean().default(true), + /** Whether this provider is configured and ready to use */ + isConfigured: z.boolean(), + /** Models available from this provider */ + models: z.array(ProviderModelEntrySchema).optional(), +}); + +/** + * Frontend schema for OpenAI-compatible providers. + * The "openai-compatible" key in ProvidersConfigMap contains this structure. + */ +export const OpenAICompatibleProvidersInfoSchema = z.object({ + /** Whether the OpenAI-compatible provider system is enabled */ + isEnabled: z.boolean().default(true), + /** Whether at least one provider instance is configured */ + isConfigured: z.boolean(), + /** List of configured provider instances */ + providers: z.array(OpenAICompatibleInstanceInfoSchema).optional(), +}); + export const ProvidersConfigMapSchema = z.record(z.string(), ProviderConfigInfoSchema); +/** + * Type guard to check if a provider config is OpenAI-compatible providers info. + */ +export function isOpenAICompatibleProvidersInfo( + value: unknown +): value is z.infer { + return ( + typeof value === "object" && + value !== null && + "providers" in value && + Array.isArray((value as { providers?: unknown }).providers) + ); +} + +/** + * Type guard to check if a provider config is a regular ProviderConfigInfo. + * Use this to filter out the special "openai-compatible" entry when iterating over providers. + */ +export function isRegularProviderConfigInfo( + value: unknown +): value is z.infer { + return !isOpenAICompatibleProvidersInfo(value); +} + export const providers = { setProviderConfig: { input: z.object({ @@ -232,6 +295,48 @@ export const providers = { }, }; +// OpenAI-Compatible Provider Schemas +export const openaiCompatibleProviders = { + getConfig: { + input: z.void(), + output: OpenAICompatibleProvidersInfoSchema, + }, + addProvider: { + input: z.object({ + id: z.string().min(1), + name: z.string().min(1), + baseUrl: z.string().url(), + apiKey: z.string().optional(), + }), + output: ResultSchema(z.void(), z.string()), + }, + updateProvider: { + input: z.object({ + instanceId: z.string(), + updates: z.object({ + name: z.string().optional(), + baseUrl: z.string().optional(), + apiKey: z.string().optional(), + enabled: z.boolean().optional(), + }), + }), + output: ResultSchema(z.void(), z.string()), + }, + removeProvider: { + input: z.object({ + instanceId: z.string(), + }), + output: ResultSchema(z.void(), z.string()), + }, + setModels: { + input: z.object({ + instanceId: z.string(), + models: z.array(ProviderModelEntrySchema), + }), + output: ResultSchema(z.void(), z.string()), + }, +}; + // Policy (admin-enforced config) export const policy = { get: { diff --git a/src/common/orpc/types.ts b/src/common/orpc/types.ts index b5d386ff57..2384ae772f 100644 --- a/src/common/orpc/types.ts +++ b/src/common/orpc/types.ts @@ -30,6 +30,12 @@ export type SendMessageOptions = z.infer; export type ProviderModelEntry = z.infer; export type ProviderConfigInfo = z.infer; +export type OpenAICompatibleInstanceInfo = z.infer< + typeof schemas.OpenAICompatibleInstanceInfoSchema +>; +export type OpenAICompatibleProvidersInfo = z.infer< + typeof schemas.OpenAICompatibleProvidersInfoSchema +>; export type ProvidersConfigMap = z.infer; export type FilePart = z.infer; export type WorkspaceChatMessage = z.infer; diff --git a/src/common/utils/providers/modelEntries.test.ts b/src/common/utils/providers/modelEntries.test.ts index 9bc383826e..72709e318f 100644 --- a/src/common/utils/providers/modelEntries.test.ts +++ b/src/common/utils/providers/modelEntries.test.ts @@ -44,6 +44,152 @@ describe("resolveModelForMetadata", () => { test("returns original model for unparseable ID", () => { expect(resolveModelForMetadata("bare-model", null)).toBe("bare-model"); }); + + test("returns original model for openai-compatible without config", () => { + expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", null)).toBe( + "openai-compatible:together-ai:llama-3-1-70b" + ); + }); + + test("returns original model for openai-compatible when not found", () => { + const config = { + "openai-compatible": { + isEnabled: true, + isConfigured: true, + providers: [ + { + id: "other-provider", + name: "Other Provider", + baseUrl: "https://other.example.com", + apiKeySet: false, + isEnabled: true, + isConfigured: true, + models: ["some-model"], + }, + ], + }, + } as unknown as ProvidersConfigMap; + + expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", config)).toBe( + "openai-compatible:together-ai:llama-3-1-70b" + ); + }); + + test("returns mapped model for openai-compatible when mapping exists", () => { + const config = { + "openai-compatible": { + isEnabled: true, + isConfigured: true, + providers: [ + { + id: "together-ai", + name: "Together AI", + baseUrl: "https://api.together.xyz", + apiKeySet: true, + isEnabled: true, + isConfigured: true, + models: [ + { + id: "llama-3-1-70b", + mappedToModel: "anthropic:claude-sonnet-4-6", + }, + ], + }, + ], + }, + } as unknown as ProvidersConfigMap; + + expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", config)).toBe( + "anthropic:claude-sonnet-4-6" + ); + }); +}); + +describe("getModelContextWindowOverride", () => { + test("returns null for openai-compatible without config", () => { + expect(getModelContextWindowOverride("openai-compatible:together-ai:llama-3-1-70b", null)).toBe( + null + ); + }); + + test("returns null for openai-compatible when model not found", () => { + const config = { + "openai-compatible": { + isEnabled: true, + isConfigured: true, + providers: [ + { + id: "other-provider", + name: "Other Provider", + baseUrl: "https://other.example.com", + apiKeySet: false, + isEnabled: true, + isConfigured: true, + models: ["some-model"], + }, + ], + }, + } as unknown as ProvidersConfigMap; + + expect( + getModelContextWindowOverride("openai-compatible:together-ai:llama-3-1-70b", config) + ).toBe(null); + }); + + test("returns context window for openai-compatible model", () => { + const config = { + "openai-compatible": { + isEnabled: true, + isConfigured: true, + providers: [ + { + id: "together-ai", + name: "Together AI", + baseUrl: "https://api.together.xyz", + apiKeySet: true, + isEnabled: true, + isConfigured: true, + models: [ + { + id: "llama-3-1-70b", + contextWindowTokens: 131072, + }, + ], + }, + ], + }, + } as unknown as ProvidersConfigMap; + + expect( + getModelContextWindowOverride("openai-compatible:together-ai:llama-3-1-70b", config) + ).toBe(131072); + }); + + test("returns null for standard provider without configWindowTokens", () => { + const config: ProvidersConfigMap = { + ollama: { + apiKeySet: false, + isEnabled: true, + isConfigured: true, + models: ["llama3"], + }, + }; + + expect(getModelContextWindowOverride("ollama:llama3", config)).toBe(null); + }); + + test("returns context window for standard provider", () => { + const config: ProvidersConfigMap = { + ollama: { + apiKeySet: false, + isEnabled: true, + isConfigured: true, + models: [{ id: "llama3", contextWindowTokens: 8192 }], + }, + }; + + expect(getModelContextWindowOverride("ollama:llama3", config)).toBe(8192); + }); }); describe("gateway-scoped provider model entry lookup", () => { diff --git a/src/node/orpc/router.ts b/src/node/orpc/router.ts index 91da13b7be..8e0627c043 100644 --- a/src/node/orpc/router.ts +++ b/src/node/orpc/router.ts @@ -1332,6 +1332,36 @@ export const router = (authToken?: string) => { } }), }, + openaiCompatibleProviders: { + getConfig: t + .input(schemas.openaiCompatibleProviders.getConfig.input) + .output(schemas.openaiCompatibleProviders.getConfig.output) + .handler(({ context }) => context.providerService.getOpenAICompatibleProvidersInfo()), + addProvider: t + .input(schemas.openaiCompatibleProviders.addProvider.input) + .output(schemas.openaiCompatibleProviders.addProvider.output) + .handler(({ context, input }) => + context.providerService.addOpenAICompatibleProvider(input) + ), + updateProvider: t + .input(schemas.openaiCompatibleProviders.updateProvider.input) + .output(schemas.openaiCompatibleProviders.updateProvider.output) + .handler(({ context, input }) => + context.providerService.updateOpenAICompatibleProvider(input.instanceId, input.updates) + ), + removeProvider: t + .input(schemas.openaiCompatibleProviders.removeProvider.input) + .output(schemas.openaiCompatibleProviders.removeProvider.output) + .handler(({ context, input }) => + context.providerService.removeOpenAICompatibleProvider(input.instanceId) + ), + setModels: t + .input(schemas.openaiCompatibleProviders.setModels.input) + .output(schemas.openaiCompatibleProviders.setModels.output) + .handler(({ context, input }) => + context.providerService.setOpenAICompatibleProviderModels(input.instanceId, input.models) + ), + }, policy: { get: t .input(schemas.policy.get.input) diff --git a/src/node/services/providerModelFactory.ts b/src/node/services/providerModelFactory.ts index acd1d9961f..9f013a5610 100644 --- a/src/node/services/providerModelFactory.ts +++ b/src/node/services/providerModelFactory.ts @@ -20,6 +20,7 @@ import { parseCodexOauthAuth } from "@/node/utils/codexOauthAuth"; import type { Config, ProviderConfig, ProvidersConfig } from "@/node/config"; import type { MuxProviderOptions } from "@/common/types/providerOptions"; import type { ExternalSecretResolver } from "@/common/types/secrets"; +import type { OpenAICompatibleProviderInstance } from "@/common/config/schemas/openaiCompatibleProvider"; import { isOpReference } from "@/common/utils/opRef"; import { isProviderDisabledInConfig } from "@/common/utils/providers/isProviderDisabled"; import type { PolicyService } from "@/node/services/policyService"; @@ -673,7 +674,10 @@ export class ProviderModelFactory { // Check if provider is supported (prevents silent failures when adding to PROVIDER_REGISTRY // but forgetting to implement handler below) - if (!(providerName in PROVIDER_REGISTRY)) { + // Note: "openai-compatible" is a special multi-instance provider handled separately + const isSupportedProvider = + providerName in PROVIDER_REGISTRY || providerName === "openai-compatible"; + if (!isSupportedProvider) { return Err({ type: "provider_not_supported", provider: providerName, @@ -1537,6 +1541,75 @@ export class ProviderModelFactory { return Ok(provider.chatModel(modelId)); } + // Handle OpenAI-compatible providers (dynamic provider instances) + // Model string format: "openai-compatible::" + if (providerName === "openai-compatible") { + // Parse instance ID from model string + const colonIndex = modelId.indexOf(":"); + if (colonIndex === -1) { + return Err({ + type: "invalid_model_string", + message: `Invalid openai-compatible model string. Expected "openai-compatible::"`, + }); + } + const instanceId = modelId.slice(0, colonIndex); + const actualModelId = modelId.slice(colonIndex + 1); + + // Load the openai-compatible provider config + const openaiCompatibleConfig = providersConfig["openai-compatible"] as + | { providers?: OpenAICompatibleProviderInstance[] } + | undefined; + + const instances = openaiCompatibleConfig?.providers ?? []; + const instance = instances.find((p) => p.id === instanceId); + + if (!instance) { + return Err({ + type: "provider_not_supported", + provider: `openai-compatible:${instanceId}`, + }); + } + + // Check if instance is enabled + if (instance.enabled === false) { + return Err({ + type: "provider_disabled", + provider: `openai-compatible:${instanceId}`, + }); + } + + // Resolve API key (may be a 1Password reference) + const resolvedApiKey = await this.resolveApiKey(instance.apiKey); + if (instance.apiKey && isOpReference(instance.apiKey) && !resolvedApiKey) { + return Err({ + type: "api_key_not_found", + provider: `openai-compatible:${instanceId}`, + }); + } + + // Require either an API key or a baseUrl (local servers may not need an API key) + if (!resolvedApiKey && !instance.baseUrl) { + return Err({ + type: "api_key_not_found", + provider: `openai-compatible:${instanceId}`, + }); + } + + const baseFetch = getProviderFetch({}); + // eslint-disable-next-line no-restricted-syntax -- Dynamic import needed for multi-instance openai-compatible providers + const { createOpenAICompatible } = await import("@ai-sdk/openai-compatible"); + + const provider = createOpenAICompatible({ + name: instanceId, + baseURL: instance.baseUrl, + apiKey: resolvedApiKey ?? "no-key", + headers: instance.headers, + fetch: baseFetch, + }); + + return Ok(provider.chatModel(actualModelId)); + } + // Generic handler for simple providers (standard API key + factory pattern) // Providers with custom logic (anthropic, openai, xai, ollama, openrouter, bedrock, mux-gateway, // github-copilot) are handled explicitly above. New providers using the standard pattern need diff --git a/src/node/services/providerService.ts b/src/node/services/providerService.ts index a8b51c74a2..25b2a2c6c1 100644 --- a/src/node/services/providerService.ts +++ b/src/node/services/providerService.ts @@ -8,10 +8,16 @@ import { import type { Result } from "@/common/types/result"; import type { AWSCredentialStatus, + OpenAICompatibleInstanceInfo, + OpenAICompatibleProvidersInfo, ProviderConfigInfo, ProviderModelEntry, ProvidersConfigMap, } from "@/common/orpc/types"; +import type { + OpenAICompatibleProviderInstance, + OpenAICompatibleProvidersConfig, +} from "@/common/config/schemas/openaiCompatibleProvider"; import { isProviderDisabledInConfig } from "@/common/utils/providers/isProviderDisabled"; import { isOpReference } from "@/common/utils/opRef"; import { @@ -93,6 +99,24 @@ export class ProviderService { } } + /** + * List all OpenAI-compatible provider instance IDs. + * Returns the IDs of configured provider instances. + */ + public listOpenAICompatibleProviders(): string[] { + try { + const providersConfig = this.config.loadProvidersConfig() ?? {}; + const openaiCompatibleConfig = providersConfig["openai-compatible"] as + | OpenAICompatibleProvidersConfig + | undefined; + + return (openaiCompatibleConfig?.providers ?? []).map((p) => p.id); + } catch (error) { + log.error("Failed to list OpenAI-compatible providers:", error); + return []; + } + } + /** * Get the full providers config with safe info (no actual API keys) */ @@ -503,4 +527,202 @@ export class ProviderService { return { success: false, error: `Failed to set provider config: ${message}` }; } } + + // --------------------------------------------------------------------------- + // OpenAI-Compatible Provider Methods + // --------------------------------------------------------------------------- + + /** + * Get OpenAI-compatible providers configuration for the frontend. + * Returns safe info (no actual API keys). + */ + public getOpenAICompatibleProvidersInfo(): OpenAICompatibleProvidersInfo { + const providersConfig = this.config.loadProvidersConfig() ?? {}; + const openaiCompatibleConfig = providersConfig["openai-compatible"] as + | OpenAICompatibleProvidersConfig + | undefined; + + const providers = openaiCompatibleConfig?.providers ?? []; + const instances: OpenAICompatibleInstanceInfo[] = providers.map((provider) => { + const apiKeyIsOpRef = isOpReference(provider.apiKey); + const isEnabled = provider.enabled !== false; + const isConfigured = !!(provider.baseUrl && (provider.apiKey ?? !provider.baseUrl)); + + return { + id: provider.id, + name: provider.name, + baseUrl: provider.baseUrl, + apiKeySet: !!provider.apiKey, + apiKeyIsOpRef: apiKeyIsOpRef || undefined, + apiKeyOpLabel: apiKeyIsOpRef ? provider.apiKeyOpLabel : undefined, + isEnabled, + isConfigured, + models: provider.models ? normalizeProviderModelEntries(provider.models) : undefined, + }; + }); + + return { + isEnabled: true, + isConfigured: instances.some((i) => i.isConfigured), + providers: instances.length > 0 ? instances : undefined, + }; + } + + /** + * Add a new OpenAI-compatible provider instance. + */ + public addOpenAICompatibleProvider( + instance: Omit + ): Result { + try { + const providersConfig = this.config.loadProvidersConfig() ?? {}; + + const openaiCompatibleConfig = (providersConfig["openai-compatible"] ?? + {}) as OpenAICompatibleProvidersConfig; + const existingProviders = openaiCompatibleConfig.providers ?? []; + + if (existingProviders.some((p) => p.id === instance.id)) { + return { + success: false, + error: `Provider instance with id "${instance.id}" already exists`, + }; + } + + const newProvider: OpenAICompatibleProviderInstance = { + ...instance, + models: [], + }; + + providersConfig["openai-compatible"] = { + ...openaiCompatibleConfig, + providers: [...existingProviders, newProvider], + }; + + this.config.saveProvidersConfig(providersConfig); + this.notifyConfigChanged(); + + return { success: true, data: undefined }; + } catch (error) { + const message = getErrorMessage(error); + return { success: false, error: `Failed to add provider: ${message}` }; + } + } + + /** + * Update an existing OpenAI-compatible provider instance. + */ + public updateOpenAICompatibleProvider( + instanceId: string, + updates: Partial> + ): Result { + try { + const providersConfig = this.config.loadProvidersConfig() ?? {}; + + const openaiCompatibleConfig = (providersConfig["openai-compatible"] ?? + {}) as OpenAICompatibleProvidersConfig; + const existingProviders = openaiCompatibleConfig.providers ?? []; + + const index = existingProviders.findIndex((p) => p.id === instanceId); + if (index === -1) { + return { success: false, error: `Provider instance "${instanceId}" not found` }; + } + + const updatedProvider: OpenAICompatibleProviderInstance = { + ...existingProviders[index], + ...updates, + id: instanceId, + }; + + const updatedProviders = [...existingProviders]; + updatedProviders[index] = updatedProvider; + + providersConfig["openai-compatible"] = { + ...openaiCompatibleConfig, + providers: updatedProviders, + }; + + this.config.saveProvidersConfig(providersConfig); + this.notifyConfigChanged(); + + return { success: true, data: undefined }; + } catch (error) { + const message = getErrorMessage(error); + return { success: false, error: `Failed to update provider: ${message}` }; + } + } + + /** + * Remove an OpenAI-compatible provider instance. + */ + public removeOpenAICompatibleProvider(instanceId: string): Result { + try { + const providersConfig = this.config.loadProvidersConfig() ?? {}; + + const openaiCompatibleConfig = (providersConfig["openai-compatible"] ?? + {}) as OpenAICompatibleProvidersConfig; + const existingProviders = openaiCompatibleConfig.providers ?? []; + + const filtered = existingProviders.filter((p) => p.id !== instanceId); + if (filtered.length === existingProviders.length) { + return { success: false, error: `Provider instance "${instanceId}" not found` }; + } + + providersConfig["openai-compatible"] = { + ...openaiCompatibleConfig, + providers: filtered.length > 0 ? filtered : undefined, + }; + + this.config.saveProvidersConfig(providersConfig); + this.notifyConfigChanged(); + + return { success: true, data: undefined }; + } catch (error) { + const message = getErrorMessage(error); + return { success: false, error: `Failed to remove provider: ${message}` }; + } + } + + /** + * Set models for a specific OpenAI-compatible provider instance. + */ + public setOpenAICompatibleProviderModels( + instanceId: string, + models: ProviderModelEntry[] + ): Result { + try { + const normalizedModels = normalizeProviderModelEntries(models); + + const providersConfig = this.config.loadProvidersConfig() ?? {}; + + const openaiCompatibleConfig = (providersConfig["openai-compatible"] ?? + {}) as OpenAICompatibleProvidersConfig; + const existingProviders = openaiCompatibleConfig.providers ?? []; + + const index = existingProviders.findIndex((p) => p.id === instanceId); + if (index === -1) { + return { success: false, error: `Provider instance "${instanceId}" not found` }; + } + + const updatedProvider: OpenAICompatibleProviderInstance = { + ...existingProviders[index], + models: normalizedModels, + }; + + const updatedProviders = [...existingProviders]; + updatedProviders[index] = updatedProvider; + + providersConfig["openai-compatible"] = { + ...openaiCompatibleConfig, + providers: updatedProviders, + }; + + this.config.saveProvidersConfig(providersConfig); + this.notifyConfigChanged(); + + return { success: true, data: undefined }; + } catch (error) { + const message = getErrorMessage(error); + return { success: false, error: `Failed to set models: ${message}` }; + } + } } From b127690c3e7f9d26d8c480fdecc720aa9e00e920 Mon Sep 17 00:00:00 2001 From: Gabriel Lima <39922116+TheGB0077@users.noreply.github.com> Date: Sun, 8 Mar 2026 11:23:46 -0300 Subject: [PATCH 2/7] refactor: flatten openai compatible providers --- src/browser/features/ChatInput/index.tsx | 25 +-- .../Settings/Sections/ModelsSection.tsx | 118 +++----------- .../Settings/Sections/ProvidersSection.tsx | 17 +- .../Settings/Sections/System1Section.tsx | 3 +- .../hooks/useModelsFromSettings.test.ts | 4 +- src/browser/hooks/useModelsFromSettings.ts | 59 ++----- .../utils/providers/modelEntries.test.ts | 152 ++++++++++-------- src/common/utils/providers/modelEntries.ts | 6 - src/node/services/providerModelFactory.ts | 33 ++-- src/node/services/providerService.ts | 73 +++++++++ 10 files changed, 222 insertions(+), 268 deletions(-) diff --git a/src/browser/features/ChatInput/index.tsx b/src/browser/features/ChatInput/index.tsx index 41d8fd3c52..1139ffff52 100644 --- a/src/browser/features/ChatInput/index.tsx +++ b/src/browser/features/ChatInput/index.tsx @@ -28,7 +28,6 @@ import { } from "@/browser/utils/policyUi"; import { usePolicy } from "@/browser/contexts/PolicyContext"; import { useAPI } from "@/browser/contexts/API"; -import { isRegularProviderConfigInfo } from "@/common/orpc/schemas/api"; import { useThinkingLevel } from "@/browser/hooks/useThinkingLevel"; import { normalizeSelectedModel } from "@/common/utils/ai/models"; import { useSendMessageOptions } from "@/browser/hooks/useSendMessageOptions"; @@ -1403,26 +1402,10 @@ const ChatInputInner: React.FC = (props) => { if (!signal.aborted) { const openaiConfig = config?.openai; const muxGatewayConfig = config?.["mux-gateway"]; - setOpenAIKeySet( - openaiConfig && isRegularProviderConfigInfo(openaiConfig) - ? openaiConfig.apiKeySet - : false - ); - setOpenAIProviderEnabled( - openaiConfig && isRegularProviderConfigInfo(openaiConfig) - ? openaiConfig.isEnabled - : true - ); - setMuxGatewayCouponSet( - muxGatewayConfig && isRegularProviderConfigInfo(muxGatewayConfig) - ? (muxGatewayConfig.couponCodeSet ?? false) - : false - ); - setMuxGatewayEnabled( - muxGatewayConfig && isRegularProviderConfigInfo(muxGatewayConfig) - ? muxGatewayConfig.isEnabled - : true - ); + setOpenAIKeySet(openaiConfig?.apiKeySet ?? false); + setOpenAIProviderEnabled(openaiConfig?.isEnabled ?? true); + setMuxGatewayCouponSet(muxGatewayConfig?.couponCodeSet ?? false); + setMuxGatewayEnabled(muxGatewayConfig?.isEnabled ?? true); } } catch { // Ignore errors fetching config diff --git a/src/browser/features/Settings/Sections/ModelsSection.tsx b/src/browser/features/Settings/Sections/ModelsSection.tsx index c4cacb5818..59ccf08cc9 100644 --- a/src/browser/features/Settings/Sections/ModelsSection.tsx +++ b/src/browser/features/Settings/Sections/ModelsSection.tsx @@ -16,7 +16,6 @@ import { useModelsFromSettings } from "@/browser/hooks/useModelsFromSettings"; import { useRouting } from "@/browser/hooks/useRouting"; import { usePersistedState } from "@/browser/hooks/usePersistedState"; import { useProvidersConfig } from "@/browser/hooks/useProvidersConfig"; -import { useOpenAICompatibleProviders } from "@/browser/hooks/useOpenAICompatibleProviders"; import { KNOWN_MODELS } from "@/common/constants/knownModels"; import { isCodexOauthRequiredModelId } from "@/common/constants/codexOAuth"; import { usePolicy } from "@/browser/contexts/PolicyContext"; @@ -28,7 +27,6 @@ import { import { getAllowedProvidersForUi, isModelAllowedByPolicy } from "@/browser/utils/policyUi"; import { LAST_CUSTOM_MODEL_PROVIDER_KEY } from "@/common/constants/storage"; import type { ProviderModelEntry } from "@/common/orpc/types"; -import { isRegularProviderConfigInfo } from "@/common/orpc/schemas/api"; import { getProviderModelEntryContextWindowTokens, getProviderModelEntryId, @@ -132,7 +130,6 @@ export function ModelsSection() { const { api } = useAPI(); const { open: openSettings } = useSettings(); const { config, loading, updateModelsOptimistically } = useProvidersConfig(); - const { config: openaiCompatibleConfig } = useOpenAICompatibleProviders(); const [lastProvider, setLastProvider] = usePersistedState(LAST_CUSTOM_MODEL_PROVIDER_KEY, ""); const [newModelId, setNewModelId] = useState(""); const [editing, setEditing] = useState(null); @@ -149,10 +146,7 @@ export function ModelsSection() { // Read OAuth state from this component's provider config source to avoid // cross-hook timing mismatches while settings are loading/refetching. const openaiConfig = config?.openai; - const codexOauthConfigured = - openaiConfig && isRegularProviderConfigInfo(openaiConfig) - ? openaiConfig.codexOauthSet === true - : false; + const codexOauthConfigured = openaiConfig?.codexOauthSet === true; // "Treat as" dropdown should only list known models — custom models don't have // the metadata (pricing, context window, tokenizer) that mapping inherits. @@ -166,10 +160,7 @@ export function ModelsSection() { (provider: string, modelId: string, excludeOriginal?: string): boolean => { if (!config) return false; const providerConfig = config[provider]; - const currentModels = - providerConfig && isRegularProviderConfigInfo(providerConfig) - ? (providerConfig.models ?? []) - : []; + const currentModels = providerConfig?.models ?? []; return currentModels.some((entry: ProviderModelEntry) => { const currentModelId = getProviderModelEntryId(entry); return currentModelId === modelId && currentModelId !== excludeOriginal; @@ -305,67 +296,33 @@ export function ModelsSection() { mappedTo ); - const isOpenAICompatible = editing.provider === "openai-compatible" && editing.instanceId; + // With flattened structure, openai-compatible/{instanceId} is just a regular provider + const updatedModels = updateModelsOptimistically(editing.provider, (models) => { + const nextModels: ProviderModelEntry[] = []; + let replaced = false; - // Optimistic update - if (isOpenAICompatible) { - // For openai-compatible, we need to update via the provider's models array - const providerConfig = openaiCompatibleConfig?.providers?.find( - (p) => p.id === editing.instanceId - ); - if (providerConfig) { - const currentModels = providerConfig.models ?? []; - const nextModels: ProviderModelEntry[] = []; - let replaced = false; - - for (const modelEntry of currentModels) { - if (!replaced && getProviderModelEntryId(modelEntry) === editing.originalModelId) { - nextModels.push(replacementEntry); - replaced = true; - continue; - } - nextModels.push(modelEntry); - } - - if (!replaced) { + for (const modelEntry of models) { + if (!replaced && getProviderModelEntryId(modelEntry) === editing.originalModelId) { nextModels.push(replacementEntry); + replaced = true; + continue; } - // Save in background - void api.openaiCompatibleProviders.setModels({ - instanceId: editing.instanceId!, - models: nextModels, - }); + nextModels.push(modelEntry); } - } else { - // Regular provider - const updatedModels = updateModelsOptimistically(editing.provider, (models) => { - const nextModels: ProviderModelEntry[] = []; - let replaced = false; - - for (const modelEntry of models) { - if (!replaced && getProviderModelEntryId(modelEntry) === editing.originalModelId) { - nextModels.push(replacementEntry); - replaced = true; - continue; - } - - nextModels.push(modelEntry); - } - if (!replaced) { - nextModels.push(replacementEntry); - } + if (!replaced) { + nextModels.push(replacementEntry); + } - return nextModels; - }); + return nextModels; + }); - // Save in background - void api.providers.setModels({ provider: editing.provider, models: updatedModels }); - } + // Save in background + void api.providers.setModels({ provider: editing.provider, models: updatedModels }); setEditing(null); - }, [api, editing, config, modelExists, updateModelsOptimistically, openaiCompatibleConfig]); + }, [api, editing, config, modelExists, updateModelsOptimistically]); // Show loading state while config is being fetched if (loading || !config) { @@ -378,28 +335,19 @@ export function ModelsSection() { } // Get all custom models across providers (excluding hidden providers like mux-gateway) - const getCustomModels = (): Array<{ - provider: string; - modelId: string; - fullId: string; - contextWindowTokens: number | null; - mappedToModel: string | null; - instanceId?: string; - }> => { + const getCustomModels = () => { const models: Array<{ provider: string; modelId: string; fullId: string; contextWindowTokens: number | null; mappedToModel: string | null; - instanceId?: string; }> = []; for (const [provider, providerConfig] of Object.entries(config)) { // Skip hidden providers (mux-gateway models are routed, not managed as a standalone list) if (HIDDEN_PROVIDERS.has(provider)) continue; - if (!isRegularProviderConfigInfo(providerConfig)) continue; - if (!providerConfig.models) continue; + if (!providerConfig?.models) continue; for (const modelEntry of providerConfig.models) { const modelId = getProviderModelEntryId(modelEntry); @@ -413,24 +361,6 @@ export function ModelsSection() { } } - // Add OpenAI-compatible provider models - if (openaiCompatibleConfig?.providers) { - for (const provider of openaiCompatibleConfig.providers) { - if (!provider.models) continue; - for (const modelEntry of provider.models) { - const modelId = getProviderModelEntryId(modelEntry); - models.push({ - provider: "openai-compatible", - modelId, - fullId: `openai-compatible:${provider.id}:${modelId}`, - contextWindowTokens: getProviderModelEntryContextWindowTokens(modelEntry), - mappedToModel: getProviderModelEntryMappedTo(modelEntry), - instanceId: provider.id, - }); - } - } - } - return models; }; @@ -542,8 +472,7 @@ export function ModelsSection() { model.provider, model.modelId, model.contextWindowTokens, - model.mappedToModel, - model.instanceId + model.mappedToModel ) } onStartContextEdit={() => @@ -551,8 +480,7 @@ export function ModelsSection() { model.provider, model.modelId, model.contextWindowTokens, - model.mappedToModel, - model.instanceId + model.mappedToModel ) } onSaveEdit={handleSaveEdit} diff --git a/src/browser/features/Settings/Sections/ProvidersSection.tsx b/src/browser/features/Settings/Sections/ProvidersSection.tsx index f02da05d9e..21aa40eec7 100644 --- a/src/browser/features/Settings/Sections/ProvidersSection.tsx +++ b/src/browser/features/Settings/Sections/ProvidersSection.tsx @@ -32,7 +32,6 @@ import { createEditKeyHandler } from "@/browser/utils/ui/keybinds"; import { getBrowserBackendBaseUrl } from "@/browser/utils/backendBaseUrl"; import { PROVIDER_DEFINITIONS, type ProviderName } from "@/common/constants/providers"; import type { ProvidersConfigMap } from "@/common/orpc/types"; -import { isRegularProviderConfigInfo } from "@/common/orpc/schemas/api"; import { OpenAICompatibleProvidersSection } from "./OpenAICompatibleProvidersSection"; import { usePolicy } from "@/browser/contexts/PolicyContext"; import { getAllowedProvidersForUi } from "@/browser/utils/policyUi"; @@ -384,20 +383,10 @@ export function ProvidersSection() { const [codexOauthAuthorizeUrl, setCodexOauthAuthorizeUrl] = useState(null); const openaiConfig = config?.openai; - const codexOauthIsConnected = - openaiConfig && isRegularProviderConfigInfo(openaiConfig) - ? openaiConfig.codexOauthSet === true - : false; - const openaiApiKeySet = - openaiConfig && isRegularProviderConfigInfo(openaiConfig) - ? openaiConfig.apiKeySet === true - : false; + const codexOauthIsConnected = openaiConfig?.codexOauthSet === true; + const openaiApiKeySet = openaiConfig?.apiKeySet === true; const codexOauthDefaultAuth = - openaiConfig && isRegularProviderConfigInfo(openaiConfig) - ? openaiConfig.codexOauthDefaultAuth === "apiKey" - ? "apiKey" - : "oauth" - : "oauth"; + openaiConfig?.codexOauthDefaultAuth === "apiKey" ? "apiKey" : "oauth"; const codexOauthDefaultAuthIsEditable = codexOauthIsConnected && openaiApiKeySet; const codexOauthLoginInProgress = diff --git a/src/browser/features/Settings/Sections/System1Section.tsx b/src/browser/features/Settings/Sections/System1Section.tsx index 3485344c17..22a0bfa7d3 100644 --- a/src/browser/features/Settings/Sections/System1Section.tsx +++ b/src/browser/features/Settings/Sections/System1Section.tsx @@ -40,7 +40,6 @@ import { getErrorMessage } from "@/common/utils/errors"; export function System1Section() { const { api } = useAPI(); const { config: providersConfig, loading: providersLoading } = useProvidersConfig(); - const { config: openaiCompatibleConfig } = useOpenAICompatibleProviders(); const [taskSettings, setTaskSettings] = useState(DEFAULT_TASK_SETTINGS); const [loaded, setLoaded] = useState(false); @@ -289,7 +288,7 @@ export function System1Section() { ); } - const allModels = getSuggestedModels(providersConfig, openaiCompatibleConfig ?? null); + const allModels = getSuggestedModels(providersConfig); const bashOutputCompactionMinLines = taskSettings.bashOutputCompactionMinLines ?? diff --git a/src/browser/hooks/useModelsFromSettings.test.ts b/src/browser/hooks/useModelsFromSettings.test.ts index 3281c688d2..37c5c8dc42 100644 --- a/src/browser/hooks/useModelsFromSettings.test.ts +++ b/src/browser/hooks/useModelsFromSettings.test.ts @@ -105,7 +105,7 @@ describe("getSuggestedModels", () => { }, }; - const suggested = getSuggestedModels(config, null); + const suggested = getSuggestedModels(config); // Custom models are listed first (in config order) expect(suggested[0]).toBe("openai:my-team-model"); @@ -134,7 +134,7 @@ describe("getSuggestedModels", () => { }, }; - const suggested = getSuggestedModels(config, null); + const suggested = getSuggestedModels(config); expect(suggested).toContain("anthropic:enabled-custom"); expect(suggested).not.toContain("openai:disabled-custom"); diff --git a/src/browser/hooks/useModelsFromSettings.ts b/src/browser/hooks/useModelsFromSettings.ts index 20a8f0fe96..11eb803a9b 100644 --- a/src/browser/hooks/useModelsFromSettings.ts +++ b/src/browser/hooks/useModelsFromSettings.ts @@ -20,7 +20,6 @@ import { } from "@/common/utils/ai/models"; import { isModelAvailable } from "@/common/routing"; import type { ProviderModelEntry, ProvidersConfigMap } from "@/common/orpc/types"; -import { getModelProvider } from "@/common/utils/ai/models"; import type { OpenAICompatibleProvidersInfo, } from "@/common/orpc/types"; @@ -31,39 +30,19 @@ import { getProviderModelEntryId } from "@/common/utils/providers/modelEntries"; const BUILT_IN_MODELS: string[] = Object.values(KNOWN_MODELS).map((m) => m.id); const BUILT_IN_MODEL_SET = new Set(BUILT_IN_MODELS); -function getCustomModels( - config: ProvidersConfigMap | null, - openaiCompatibleConfig: OpenAICompatibleProvidersInfo | null -): string[] { - if (!config && !openaiCompatibleConfig) return []; +function getCustomModels(config: ProvidersConfigMap | null): string[] { + if (!config) return []; const models: string[] = []; - // Get models from regular providers - if (config) { - for (const [provider, info] of Object.entries(config)) { - // Skip mux-gateway - those models are accessed via the cloud toggle, not listed separately - if (provider === "mux-gateway") continue; - // Skip openai-compatible - handled separately - if (provider === "openai-compatible") continue; - // Only surface custom models from enabled providers - if (!info.isEnabled) continue; - if (!info.models) continue; - for (const modelEntry of info.models) { - const modelId = getProviderModelEntryId(modelEntry); - models.push(`${provider}:${modelId}`); - } - } - } - - // Get models from OpenAI-compatible providers - if (openaiCompatibleConfig?.providers) { - for (const provider of openaiCompatibleConfig.providers) { - if (!provider.isEnabled) continue; - if (!provider.models) continue; - for (const modelEntry of provider.models) { - const modelId = getProviderModelEntryId(modelEntry); - models.push(`openai-compatible:${provider.id}:${modelId}`); - } + for (const [provider, info] of Object.entries(config)) { + // Skip mux-gateway - those models are accessed via the cloud toggle, not listed separately + if (provider === "mux-gateway") continue; + // Only surface custom models from enabled providers + if (!info.isEnabled) continue; + if (!info.models) continue; + for (const modelEntry of info.models) { + const modelId = getProviderModelEntryId(modelEntry); + models.push(`${provider}:${modelId}`); } } @@ -107,11 +86,8 @@ function dedupeKeepFirst(models: string[]): string[] { return out; } -export function getSuggestedModels( - config: ProvidersConfigMap | null, - openaiCompatibleConfig: OpenAICompatibleProvidersInfo | null -): string[] { - const customModels = getCustomModels(config, openaiCompatibleConfig); +export function getSuggestedModels(config: ProvidersConfigMap | null): string[] { + const customModels = getCustomModels(config); return dedupeKeepFirst([...customModels, ...BUILT_IN_MODELS]); } @@ -187,9 +163,9 @@ export function useModelsFromSettings() { ); const customModels = useMemo(() => { - const next = filterHiddenModels(getCustomModels(config, openaiCompatibleConfig), hiddenModels); + const next = filterHiddenModels(getCustomModels(config), hiddenModels); return effectivePolicy ? next.filter((m) => isModelAllowedByPolicy(effectivePolicy, m)) : next; - }, [config, openaiCompatibleConfig, hiddenModels, effectivePolicy]); + }, [config, hiddenModels, effectivePolicy]); const openaiApiKeySet = config === null ? null : config.openai?.apiKeySet === true; const codexOauthSet = config === null ? null : config.openai?.codexOauthSet === true; @@ -248,10 +224,7 @@ export function useModelsFromSettings() { ); const models = useMemo(() => { - const suggested = filterHiddenModels( - getSuggestedModels(config, openaiCompatibleConfig), - hiddenModels - ); + const suggested = filterHiddenModels(getSuggestedModels(config), hiddenModels); // Hide models that are unavailable from both direct and gateway routes. // Keep all models visible while provider config is still loading to avoid UI flicker. diff --git a/src/common/utils/providers/modelEntries.test.ts b/src/common/utils/providers/modelEntries.test.ts index 72709e318f..e1975bb143 100644 --- a/src/common/utils/providers/modelEntries.test.ts +++ b/src/common/utils/providers/modelEntries.test.ts @@ -45,60 +45,74 @@ describe("resolveModelForMetadata", () => { expect(resolveModelForMetadata("bare-model", null)).toBe("bare-model"); }); - test("returns original model for openai-compatible without config", () => { - expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", null)).toBe( - "openai-compatible:together-ai:llama-3-1-70b" + // New format tests: openai-compatible/{instanceId}:{modelId} + test("returns original model for openai-compatible new format without config", () => { + expect(resolveModelForMetadata("openai-compatible/together-ai:llama-3-1-70b", null)).toBe( + "openai-compatible/together-ai:llama-3-1-70b" + ); + }); + + test("returns original model for openai-compatible new format when not found", () => { + const config: ProvidersConfigMap = { + "openai-compatible/other-provider": { + apiKeySet: false, + isEnabled: true, + isConfigured: true, + baseUrl: "https://other.example.com", + models: ["some-model"], + }, + }; + + expect(resolveModelForMetadata("openai-compatible/together-ai:llama-3-1-70b", config)).toBe( + "openai-compatible/together-ai:llama-3-1-70b" ); }); - test("returns original model for openai-compatible when not found", () => { - const config = { - "openai-compatible": { + test("returns mapped model for openai-compatible new format when mapping exists", () => { + const config: ProvidersConfigMap = { + "openai-compatible/together-ai": { + apiKeySet: true, isEnabled: true, isConfigured: true, - providers: [ + baseUrl: "https://api.together.xyz", + models: [ { - id: "other-provider", - name: "Other Provider", - baseUrl: "https://other.example.com", - apiKeySet: false, - isEnabled: true, - isConfigured: true, - models: ["some-model"], + id: "llama-3-1-70b", + mappedToModel: "anthropic:claude-sonnet-4-6", }, ], }, - } as unknown as ProvidersConfigMap; + }; - expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", config)).toBe( + expect(resolveModelForMetadata("openai-compatible/together-ai:llama-3-1-70b", config)).toBe( + "anthropic:claude-sonnet-4-6" + ); + }); + + // Old format tests (dual-support for existing data): openai-compatible:{instanceId}:{modelId} + test("returns original model for openai-compatible old format without config", () => { + expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", null)).toBe( "openai-compatible:together-ai:llama-3-1-70b" ); }); - test("returns mapped model for openai-compatible when mapping exists", () => { - const config = { - "openai-compatible": { + test("returns mapped model for openai-compatible old format when mapping exists", () => { + const config: ProvidersConfigMap = { + "openai-compatible/together-ai": { + apiKeySet: true, isEnabled: true, isConfigured: true, - providers: [ + baseUrl: "https://api.together.xyz", + models: [ { - id: "together-ai", - name: "Together AI", - baseUrl: "https://api.together.xyz", - apiKeySet: true, - isEnabled: true, - isConfigured: true, - models: [ - { - id: "llama-3-1-70b", - mappedToModel: "anthropic:claude-sonnet-4-6", - }, - ], + id: "llama-3-1-70b", + mappedToModel: "anthropic:claude-sonnet-4-6", }, ], }, - } as unknown as ProvidersConfigMap; + }; + // Old format should still resolve using flattened config expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", config)).toBe( "anthropic:claude-sonnet-4-6" ); @@ -106,59 +120,65 @@ describe("resolveModelForMetadata", () => { }); describe("getModelContextWindowOverride", () => { - test("returns null for openai-compatible without config", () => { - expect(getModelContextWindowOverride("openai-compatible:together-ai:llama-3-1-70b", null)).toBe( + test("returns null for openai-compatible new format without config", () => { + expect(getModelContextWindowOverride("openai-compatible/together-ai:llama-3-1-70b", null)).toBe( null ); }); - test("returns null for openai-compatible when model not found", () => { - const config = { - "openai-compatible": { + test("returns null for openai-compatible new format when model not found", () => { + const config: ProvidersConfigMap = { + "openai-compatible/other-provider": { + apiKeySet: false, + isEnabled: true, + isConfigured: true, + baseUrl: "https://other.example.com", + models: ["some-model"], + }, + }; + + expect( + getModelContextWindowOverride("openai-compatible/together-ai:llama-3-1-70b", config) + ).toBe(null); + }); + + test("returns context window for openai-compatible new format model", () => { + const config: ProvidersConfigMap = { + "openai-compatible/together-ai": { + apiKeySet: true, isEnabled: true, isConfigured: true, - providers: [ + baseUrl: "https://api.together.xyz", + models: [ { - id: "other-provider", - name: "Other Provider", - baseUrl: "https://other.example.com", - apiKeySet: false, - isEnabled: true, - isConfigured: true, - models: ["some-model"], + id: "llama-3-1-70b", + contextWindowTokens: 131072, }, ], }, - } as unknown as ProvidersConfigMap; + }; expect( - getModelContextWindowOverride("openai-compatible:together-ai:llama-3-1-70b", config) - ).toBe(null); + getModelContextWindowOverride("openai-compatible/together-ai:llama-3-1-70b", config) + ).toBe(131072); }); - test("returns context window for openai-compatible model", () => { - const config = { - "openai-compatible": { + // Old format tests (dual-support) + test("returns context window for openai-compatible old format model", () => { + const config: ProvidersConfigMap = { + "openai-compatible/together-ai": { + apiKeySet: true, isEnabled: true, isConfigured: true, - providers: [ + baseUrl: "https://api.together.xyz", + models: [ { - id: "together-ai", - name: "Together AI", - baseUrl: "https://api.together.xyz", - apiKeySet: true, - isEnabled: true, - isConfigured: true, - models: [ - { - id: "llama-3-1-70b", - contextWindowTokens: 131072, - }, - ], + id: "llama-3-1-70b", + contextWindowTokens: 131072, }, ], }, - } as unknown as ProvidersConfigMap; + }; expect( getModelContextWindowOverride("openai-compatible:together-ai:llama-3-1-70b", config) diff --git a/src/common/utils/providers/modelEntries.ts b/src/common/utils/providers/modelEntries.ts index 03841c57d6..69e2bf70de 100644 --- a/src/common/utils/providers/modelEntries.ts +++ b/src/common/utils/providers/modelEntries.ts @@ -46,12 +46,6 @@ function findProviderModelEntry( return null; } - for (const entry of entries) { - if (getProviderModelEntryId(entry) === modelId) { - return entry; - } - } - return null; } diff --git a/src/node/services/providerModelFactory.ts b/src/node/services/providerModelFactory.ts index 9f013a5610..da80331e16 100644 --- a/src/node/services/providerModelFactory.ts +++ b/src/node/services/providerModelFactory.ts @@ -674,9 +674,10 @@ export class ProviderModelFactory { // Check if provider is supported (prevents silent failures when adding to PROVIDER_REGISTRY // but forgetting to implement handler below) - // Note: "openai-compatible" is a special multi-instance provider handled separately - const isSupportedProvider = - providerName in PROVIDER_REGISTRY || providerName === "openai-compatible"; + // Note: "openai-compatible" and "openai-compatible/*" are special multi-instance providers handled separately + const isOpenAICompatibleProvider = + providerName === "openai-compatible" || providerName.startsWith("openai-compatible/"); + const isSupportedProvider = providerName in PROVIDER_REGISTRY || isOpenAICompatibleProvider; if (!isSupportedProvider) { return Err({ type: "provider_not_supported", @@ -1542,18 +1543,12 @@ export class ProviderModelFactory { } // Handle OpenAI-compatible providers (dynamic provider instances) - // Model string format: "openai-compatible::" - if (providerName === "openai-compatible") { - // Parse instance ID from model string - const colonIndex = modelId.indexOf(":"); - if (colonIndex === -1) { - return Err({ - type: "invalid_model_string", - message: `Invalid openai-compatible model string. Expected "openai-compatible::"`, - }); - } - const instanceId = modelId.slice(0, colonIndex); - const actualModelId = modelId.slice(colonIndex + 1); + const isOpenAICompatible = + providerName === "openai-compatible" || providerName.startsWith("openai-compatible/"); + + if (isOpenAICompatible) { + const instanceId = providerName.slice("openai-compatible/".length); + const actualModelId = modelId; // Load the openai-compatible provider config const openaiCompatibleConfig = providersConfig["openai-compatible"] as @@ -1566,7 +1561,7 @@ export class ProviderModelFactory { if (!instance) { return Err({ type: "provider_not_supported", - provider: `openai-compatible:${instanceId}`, + provider: `openai-compatible/${instanceId}`, }); } @@ -1574,7 +1569,7 @@ export class ProviderModelFactory { if (instance.enabled === false) { return Err({ type: "provider_disabled", - provider: `openai-compatible:${instanceId}`, + provider: `openai-compatible/${instanceId}`, }); } @@ -1583,7 +1578,7 @@ export class ProviderModelFactory { if (instance.apiKey && isOpReference(instance.apiKey) && !resolvedApiKey) { return Err({ type: "api_key_not_found", - provider: `openai-compatible:${instanceId}`, + provider: `openai-compatible/${instanceId}`, }); } @@ -1591,7 +1586,7 @@ export class ProviderModelFactory { if (!resolvedApiKey && !instance.baseUrl) { return Err({ type: "api_key_not_found", - provider: `openai-compatible:${instanceId}`, + provider: `openai-compatible/${instanceId}`, }); } diff --git a/src/node/services/providerService.ts b/src/node/services/providerService.ts index 25b2a2c6c1..8ed11fe4dd 100644 --- a/src/node/services/providerService.ts +++ b/src/node/services/providerService.ts @@ -84,6 +84,16 @@ export class ProviderService { this.emitter.emit("configChanged"); } + /** + * Parse an openai-compatible provider key like "openai-compatible/fireworks". + * Returns the instance ID if the key matches the pattern, null otherwise. + */ + private parseOpenAICompatibleKey(provider: string): string | null { + if (!provider.startsWith("openai-compatible/")) return null; + const instanceId = provider.slice("openai-compatible/".length); + return instanceId.length > 0 ? instanceId : null; + } + public list(): ProviderName[] { try { const providers = [...SUPPORTED_PROVIDERS]; @@ -265,6 +275,25 @@ export class ProviderService { result[provider] = providerInfo; } + // Flatten openai-compatible providers into individual entries with keys like + // "openai-compatible/fireworks" so they follow the standard provider:modelId pattern. + // This eliminates the need for special handling and type guards throughout the codebase. + const openaiCompatibleInfo = this.getOpenAICompatibleProvidersInfo(); + if (openaiCompatibleInfo.providers && openaiCompatibleInfo.providers.length > 0) { + for (const instance of openaiCompatibleInfo.providers) { + const key = `openai-compatible/${instance.id}`; + result[key] = { + apiKeySet: instance.apiKeySet, + apiKeyIsOpRef: instance.apiKeyIsOpRef, + apiKeyOpLabel: instance.apiKeyOpLabel, + isEnabled: instance.isEnabled, + isConfigured: instance.isConfigured, + baseUrl: instance.baseUrl, + models: instance.models, + }; + } + } + return result; } @@ -272,6 +301,12 @@ export class ProviderService { * Set custom models for a provider */ public setModels(provider: string, models: ProviderModelEntry[]): Result { + // Route openai-compatible/* keys to the dedicated handler + const instanceId = this.parseOpenAICompatibleKey(provider); + if (instanceId) { + return this.setOpenAICompatibleProviderModels(instanceId, models); + } + try { const normalizedModels = normalizeProviderModelEntries(models); @@ -379,6 +414,24 @@ export class ProviderService { keyPath: string[], value: unknown ): Promise> { + // Route openai-compatible/* keys to the dedicated handler + const instanceId = this.parseOpenAICompatibleKey(provider); + if (instanceId) { + // Map keyPath to updateOpenAICompatibleProvider format + const updates: Partial> = {}; + if (keyPath.length === 1) { + const key = keyPath[0]; + if (key === "baseUrl") { + updates.baseUrl = value as string; + } else if (key === "apiKey") { + updates.apiKey = value as string; + } else if (key === "enabled") { + updates.enabled = value as boolean; + } + } + return this.updateOpenAICompatibleProvider(instanceId, updates); + } + try { // Load current providers config or create empty const providersConfig = this.config.loadProvidersConfig() ?? {}; @@ -445,6 +498,26 @@ export class ProviderService { keyPath: string[], value: string | boolean ): Promise> { + // Route openai-compatible/* keys to the dedicated handler + const instanceId = this.parseOpenAICompatibleKey(provider); + if (instanceId) { + // Map keyPath to updateOpenAICompatibleProvider format + const updates: Partial> = {}; + if (keyPath.length === 1) { + const key = keyPath[0]; + if (key === "baseUrl") { + updates.baseUrl = value as string; + } else if (key === "apiKey") { + updates.apiKey = value as string; + } else if (key === "name") { + updates.name = value as string; + } else if (key === "enabled") { + updates.enabled = value as boolean; + } + } + return this.updateOpenAICompatibleProvider(instanceId, updates); + } + try { // Load current providers config or create empty const providersConfig = this.config.loadProvidersConfig() ?? {}; From 2ceac4bbe42fa9e23b60bdd28cc17119f493e80a Mon Sep 17 00:00:00 2001 From: Gabriel Lima <39922116+TheGB0077@users.noreply.github.com> Date: Sun, 8 Mar 2026 11:48:13 -0300 Subject: [PATCH 3/7] fix: remove ' : ' separated parsing from openai-compatible models --- .../schemas/openaiCompatibleProvider.ts | 2 +- .../utils/providers/modelEntries.test.ts | 51 ------------------- src/common/utils/providers/modelEntries.ts | 27 +++++++++- 3 files changed, 27 insertions(+), 53 deletions(-) diff --git a/src/common/config/schemas/openaiCompatibleProvider.ts b/src/common/config/schemas/openaiCompatibleProvider.ts index f8762d2095..5372d76890 100644 --- a/src/common/config/schemas/openaiCompatibleProvider.ts +++ b/src/common/config/schemas/openaiCompatibleProvider.ts @@ -7,7 +7,7 @@ import { ProviderModelEntrySchema } from "./providerModelEntry"; * Examples: Together AI, Fireworks, LM Studio, Jan, custom inference servers. */ export const OpenAICompatibleProviderInstanceSchema = z.object({ - /** Unique identifier for this provider instance (used in model strings like "openai-compatible:my-provider:model-id") */ + /** Unique identifier for this provider instance (used in model strings like "openai-compatible/my-provider:model-id") */ id: z.string().min(1), /** Display name shown in the UI */ name: z.string().min(1), diff --git a/src/common/utils/providers/modelEntries.test.ts b/src/common/utils/providers/modelEntries.test.ts index e1975bb143..69542d6c98 100644 --- a/src/common/utils/providers/modelEntries.test.ts +++ b/src/common/utils/providers/modelEntries.test.ts @@ -88,35 +88,6 @@ describe("resolveModelForMetadata", () => { "anthropic:claude-sonnet-4-6" ); }); - - // Old format tests (dual-support for existing data): openai-compatible:{instanceId}:{modelId} - test("returns original model for openai-compatible old format without config", () => { - expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", null)).toBe( - "openai-compatible:together-ai:llama-3-1-70b" - ); - }); - - test("returns mapped model for openai-compatible old format when mapping exists", () => { - const config: ProvidersConfigMap = { - "openai-compatible/together-ai": { - apiKeySet: true, - isEnabled: true, - isConfigured: true, - baseUrl: "https://api.together.xyz", - models: [ - { - id: "llama-3-1-70b", - mappedToModel: "anthropic:claude-sonnet-4-6", - }, - ], - }, - }; - - // Old format should still resolve using flattened config - expect(resolveModelForMetadata("openai-compatible:together-ai:llama-3-1-70b", config)).toBe( - "anthropic:claude-sonnet-4-6" - ); - }); }); describe("getModelContextWindowOverride", () => { @@ -163,28 +134,6 @@ describe("getModelContextWindowOverride", () => { ).toBe(131072); }); - // Old format tests (dual-support) - test("returns context window for openai-compatible old format model", () => { - const config: ProvidersConfigMap = { - "openai-compatible/together-ai": { - apiKeySet: true, - isEnabled: true, - isConfigured: true, - baseUrl: "https://api.together.xyz", - models: [ - { - id: "llama-3-1-70b", - contextWindowTokens: 131072, - }, - ], - }, - }; - - expect( - getModelContextWindowOverride("openai-compatible:together-ai:llama-3-1-70b", config) - ).toBe(131072); - }); - test("returns null for standard provider without configWindowTokens", () => { const config: ProvidersConfigMap = { ollama: { diff --git a/src/common/utils/providers/modelEntries.ts b/src/common/utils/providers/modelEntries.ts index 69e2bf70de..49592a528e 100644 --- a/src/common/utils/providers/modelEntries.ts +++ b/src/common/utils/providers/modelEntries.ts @@ -36,7 +36,32 @@ function parseProviderModelId(fullModelId: string): ParsedProviderModelId | null }; } -function findProviderModelEntry( +/** + * Parse an openai-compatible model ID. + * Format: "openai-compatible/{instanceId}:{modelId}" + * + * Returns { provider, modelId, instanceId } or null if not an openai-compatible model. + */ +function parseOpenAICompatibleModelId( + fullModelId: string +): { provider: string; modelId: string; instanceId: string } | null { + // Format: openai-compatible/{instanceId}:{modelId} + if (fullModelId.startsWith("openai-compatible/")) { + const colonIndex = fullModelId.indexOf(":"); + if (colonIndex === -1 || colonIndex <= "openai-compatible/".length) { + return null; + } + return { + provider: fullModelId.slice(0, colonIndex), + modelId: fullModelId.slice(colonIndex + 1), + instanceId: fullModelId.slice("openai-compatible/".length, colonIndex), + }; + } + + return null; +} + +export function findProviderModelEntry( providersConfig: ProvidersConfigMap | null, provider: string, modelId: string From 39e6514e56e81fd3d017ad1ed18b9719ba2d9488 Mon Sep 17 00:00:00 2001 From: Gabriel Lima <39922116+TheGB0077@users.noreply.github.com> Date: Sun, 8 Mar 2026 11:52:34 -0300 Subject: [PATCH 4/7] chore: remove outdated descriptor From bb7b9abf1e09ca691948ccffe7d240e04a7146fb Mon Sep 17 00:00:00 2001 From: Gabriel Lima <39922116+TheGB0077@users.noreply.github.com> Date: Sun, 8 Mar 2026 12:12:39 -0300 Subject: [PATCH 5/7] Update ModelsSection.tsx --- .../features/Settings/Sections/ModelsSection.tsx | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/browser/features/Settings/Sections/ModelsSection.tsx b/src/browser/features/Settings/Sections/ModelsSection.tsx index 59ccf08cc9..ca49207999 100644 --- a/src/browser/features/Settings/Sections/ModelsSection.tsx +++ b/src/browser/features/Settings/Sections/ModelsSection.tsx @@ -35,7 +35,7 @@ import { import { ModelRow } from "./ModelRow"; // Providers to exclude from the custom models UI (handled specially or internal) -const HIDDEN_PROVIDERS = new Set(["mux-gateway", "openai-compatible"]); +const HIDDEN_PROVIDERS = new Set(["mux-gateway"]); // Shared header cell styles const headerCellBase = "py-1.5 pr-2 text-xs font-medium text-muted"; @@ -61,8 +61,6 @@ interface EditingState { contextWindowTokens: string; mappedToModel: string; focus?: "model" | "context"; - /** Instance ID for openai-compatible providers (e.g., "together-ai") */ - instanceId?: string; } function parseContextWindowTokensInput(value: string): number | null { @@ -219,8 +217,7 @@ export function ModelsSection() { provider: string, modelId: string, contextWindowTokens: number | null, - mappedToModel: string | null, - instanceId?: string + mappedToModel: string | null ) => { setEditing({ provider, @@ -229,7 +226,6 @@ export function ModelsSection() { contextWindowTokens: contextWindowTokens === null ? "" : String(contextWindowTokens), mappedToModel: mappedToModel ?? "", focus: "model", - instanceId, }); setError(null); }, @@ -241,8 +237,7 @@ export function ModelsSection() { provider: string, modelId: string, contextWindowTokens: number | null, - mappedToModel: string | null, - instanceId?: string + mappedToModel: string | null ) => { setEditing({ provider, @@ -251,7 +246,6 @@ export function ModelsSection() { contextWindowTokens: contextWindowTokens === null ? "" : String(contextWindowTokens), mappedToModel: mappedToModel ?? "", focus: "context", - instanceId, }); setError(null); }, From 7e6a0209ae5b6c1096acd07b8fad7d515b936e2c Mon Sep 17 00:00:00 2001 From: Gabriel Lima <39922116+TheGB0077@users.noreply.github.com> Date: Sun, 8 Mar 2026 12:20:27 -0300 Subject: [PATCH 6/7] chore: remove redundant check --- src/browser/features/Settings/Sections/ModelsSection.tsx | 2 +- src/node/services/providerModelFactory.ts | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/browser/features/Settings/Sections/ModelsSection.tsx b/src/browser/features/Settings/Sections/ModelsSection.tsx index ca49207999..2cc8eafb72 100644 --- a/src/browser/features/Settings/Sections/ModelsSection.tsx +++ b/src/browser/features/Settings/Sections/ModelsSection.tsx @@ -290,7 +290,7 @@ export function ModelsSection() { mappedTo ); - // With flattened structure, openai-compatible/{instanceId} is just a regular provider + // Optimistic update - returns new models array for API call const updatedModels = updateModelsOptimistically(editing.provider, (models) => { const nextModels: ProviderModelEntry[] = []; let replaced = false; diff --git a/src/node/services/providerModelFactory.ts b/src/node/services/providerModelFactory.ts index da80331e16..11e9a05eaa 100644 --- a/src/node/services/providerModelFactory.ts +++ b/src/node/services/providerModelFactory.ts @@ -674,9 +674,8 @@ export class ProviderModelFactory { // Check if provider is supported (prevents silent failures when adding to PROVIDER_REGISTRY // but forgetting to implement handler below) - // Note: "openai-compatible" and "openai-compatible/*" are special multi-instance providers handled separately - const isOpenAICompatibleProvider = - providerName === "openai-compatible" || providerName.startsWith("openai-compatible/"); + // Note: "openai-compatible/*" are special multi-instance providers handled separately + const isOpenAICompatibleProvider = providerName.startsWith("openai-compatible/"); const isSupportedProvider = providerName in PROVIDER_REGISTRY || isOpenAICompatibleProvider; if (!isSupportedProvider) { return Err({ @@ -1543,8 +1542,7 @@ export class ProviderModelFactory { } // Handle OpenAI-compatible providers (dynamic provider instances) - const isOpenAICompatible = - providerName === "openai-compatible" || providerName.startsWith("openai-compatible/"); + const isOpenAICompatible = providerName.startsWith("openai-compatible/"); if (isOpenAICompatible) { const instanceId = providerName.slice("openai-compatible/".length); From 18c38e820782fab9e8d4443e4b3192bea157a1de Mon Sep 17 00:00:00 2001 From: Gabriel Lima <39922116+TheGB0077@users.noreply.github.com> Date: Thu, 12 Mar 2026 14:16:00 -0300 Subject: [PATCH 7/7] reafactor: rebase and move provider section --- .../features/Settings/Sections/ModelRow.tsx | 9 +++++++++ .../OpenAICompatibleProvidersSection.tsx | 4 ++-- .../Settings/Sections/ProvidersSection.tsx | 20 ++++++++++++++++--- .../Settings/Sections/System1Section.tsx | 1 - src/browser/hooks/useModelsFromSettings.ts | 5 ----- .../utils/providers/openaiCompatible.ts | 13 ++++++++++++ 6 files changed, 41 insertions(+), 11 deletions(-) create mode 100644 src/common/utils/providers/openaiCompatible.ts diff --git a/src/browser/features/Settings/Sections/ModelRow.tsx b/src/browser/features/Settings/Sections/ModelRow.tsx index 14b0d302d5..87d5a3c4ec 100644 --- a/src/browser/features/Settings/Sections/ModelRow.tsx +++ b/src/browser/features/Settings/Sections/ModelRow.tsx @@ -15,6 +15,10 @@ import { formatModelDisplayName } from "@/common/utils/ai/modelDisplay"; import { cn } from "@/common/lib/utils"; import type { AvailableRoute } from "@/common/routing"; import { getModelStats, type ModelStats } from "@/common/utils/tokens/modelStats"; +import { + isOpenAICompatibleProvider, + formatOpenAICompatibleDisplayName, +} from "@/common/utils/providers/openaiCompatible"; /** Format tokens as human-readable string (e.g. 200000 -> "200k") */ function formatTokenCount(tokens: number): string { @@ -329,6 +333,11 @@ export function ModelRow(props: ModelRowProps) { {props.modelId} + {isOpenAICompatibleProvider(props.provider) && ( + + {formatOpenAICompatibleDisplayName(props.provider)} + + )} {mappedModelDisplayName && ( → diff --git a/src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx b/src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx index 0fa845c4c9..da63f27dba 100644 --- a/src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx +++ b/src/browser/features/Settings/Sections/OpenAICompatibleProvidersSection.tsx @@ -262,7 +262,7 @@ export function OpenAICompatibleProvidersSection({

Configure OpenAI-compatible API endpoints (Together AI, Fireworks, LM Studio, etc.). Models are accessed via{" "} - openai-compatible:provider-id:model-name. + openai-compatible/provider-id:model-name.

{error && ( @@ -455,7 +455,7 @@ export function OpenAICompatibleProvidersSection({ placeholder="together-ai" />

- Used in model strings: openai-compatible: + Used in model strings: openai-compatible/ together-ai:model-id

diff --git a/src/browser/features/Settings/Sections/ProvidersSection.tsx b/src/browser/features/Settings/Sections/ProvidersSection.tsx index 21aa40eec7..cdd7deee2b 100644 --- a/src/browser/features/Settings/Sections/ProvidersSection.tsx +++ b/src/browser/features/Settings/Sections/ProvidersSection.tsx @@ -31,7 +31,6 @@ import { CSS } from "@dnd-kit/utilities"; import { createEditKeyHandler } from "@/browser/utils/ui/keybinds"; import { getBrowserBackendBaseUrl } from "@/browser/utils/backendBaseUrl"; import { PROVIDER_DEFINITIONS, type ProviderName } from "@/common/constants/providers"; -import type { ProvidersConfigMap } from "@/common/orpc/types"; import { OpenAICompatibleProvidersSection } from "./OpenAICompatibleProvidersSection"; import { usePolicy } from "@/browser/contexts/PolicyContext"; import { getAllowedProvidersForUi } from "@/browser/utils/policyUi"; @@ -1228,10 +1227,27 @@ export function ProvidersSection() { {( [ { key: "direct", label: "Direct Providers", providers: providerGroups.direct }, + { + key: "openai-compatible", + label: "User Providers", + providers: [], + render: () => , + }, { key: "gateway", label: "Gateways", providers: providerGroups.gateway }, { key: "local", label: "Local", providers: providerGroups.local }, ] as const ).map((section) => { + if ("render" in section) { + return ( +
+
+ {section.label} +
+ {section.render()} +
+ ); + } + if (section.providers.length === 0) { return null; } @@ -2203,8 +2219,6 @@ export function ProvidersSection() { ); })} - - {config && !hasAnyConfiguredProvider && (
No providers are currently enabled. You won't be able to send messages until you diff --git a/src/browser/features/Settings/Sections/System1Section.tsx b/src/browser/features/Settings/Sections/System1Section.tsx index 22a0bfa7d3..c6ca010d11 100644 --- a/src/browser/features/Settings/Sections/System1Section.tsx +++ b/src/browser/features/Settings/Sections/System1Section.tsx @@ -14,7 +14,6 @@ import { useAPI } from "@/browser/contexts/API"; import { useOptionalWorkspaceContext } from "@/browser/contexts/WorkspaceContext"; import { getDefaultModel, getSuggestedModels } from "@/browser/hooks/useModelsFromSettings"; import { useProvidersConfig } from "@/browser/hooks/useProvidersConfig"; -import { useOpenAICompatibleProviders } from "@/browser/hooks/useOpenAICompatibleProviders"; import { usePersistedState } from "@/browser/hooks/usePersistedState"; import { getModelKey, diff --git a/src/browser/hooks/useModelsFromSettings.ts b/src/browser/hooks/useModelsFromSettings.ts index 11eb803a9b..cb54f68d31 100644 --- a/src/browser/hooks/useModelsFromSettings.ts +++ b/src/browser/hooks/useModelsFromSettings.ts @@ -8,7 +8,6 @@ import { import { WORKSPACE_DEFAULTS } from "@/constants/workspaceDefaults"; import { useProvidersConfig } from "./useProvidersConfig"; import { useRouting } from "./useRouting"; -import { useOpenAICompatibleProviders } from "./useOpenAICompatibleProviders"; import { usePolicy } from "@/browser/contexts/PolicyContext"; import { useAPI } from "@/browser/contexts/API"; import { isValidProvider } from "@/common/constants/providers"; @@ -20,9 +19,6 @@ import { } from "@/common/utils/ai/models"; import { isModelAvailable } from "@/common/routing"; import type { ProviderModelEntry, ProvidersConfigMap } from "@/common/orpc/types"; -import type { - OpenAICompatibleProvidersInfo, -} from "@/common/orpc/types"; import { DEFAULT_MODEL_KEY, HIDDEN_MODELS_KEY } from "@/common/constants/storage"; import { getProviderModelEntryId } from "@/common/utils/providers/modelEntries"; @@ -127,7 +123,6 @@ export function useModelsFromSettings() { ); const { config, refresh } = useProvidersConfig(); const { routePriority, routeOverrides } = useRouting(); - const { config: openaiCompatibleConfig } = useOpenAICompatibleProviders(); const [defaultModel, setDefaultModel] = usePersistedState( DEFAULT_MODEL_KEY, diff --git a/src/common/utils/providers/openaiCompatible.ts b/src/common/utils/providers/openaiCompatible.ts new file mode 100644 index 0000000000..d118a7b844 --- /dev/null +++ b/src/common/utils/providers/openaiCompatible.ts @@ -0,0 +1,13 @@ +const OPENAI_COMPATIBLE_PREFIX = "openai-compatible/"; + +export function isOpenAICompatibleProvider(provider: string): boolean { + return provider.startsWith(OPENAI_COMPATIBLE_PREFIX); +} + +export function formatOpenAICompatibleDisplayName(provider: string): string { + const instanceId = provider.slice(OPENAI_COMPATIBLE_PREFIX.length); + return instanceId + .split("-") + .map((part) => part.charAt(0).toUpperCase() + part.slice(1)) + .join(" "); +}