Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 59 additions & 7 deletions lib/codex-manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ import {
} from "./forecast.js";
import { createLogger } from "./logger.js";
import { MODEL_FAMILIES, type ModelFamily } from "./prompts/codex.js";
import {
getModelCapabilities,
getModelProfile,
resolveNormalizedModel,
} from "./request/helpers/model-map.js";
import {
fetchCodexQuotaSnapshot,
formatQuotaSnapshotLine,
Expand Down Expand Up @@ -95,6 +100,14 @@ type TokenSuccessWithAccount = TokenSuccess & {
type PromptTone = "accent" | "success" | "warning" | "danger" | "muted";
const log = createLogger("codex-manager");

interface ModelInspection {
requested: string;
normalized: string;
remapped: boolean;
promptFamily: ModelFamily;
capabilities: ReturnType<typeof getModelCapabilities>;
}

function stylePromptText(text: string, tone: PromptTone): string {
if (!output.isTTY) return text;
const ui = getUiRuntimeOptions();
Expand All @@ -117,6 +130,30 @@ function stylePromptText(text: string, tone: PromptTone): string {
return `${legacyCode}${text}${ANSI.reset}`;
}

function inspectRequestedModel(requestedModel: string): ModelInspection {
const normalized = resolveNormalizedModel(requestedModel);
const profile = getModelProfile(normalized);
return {
requested: requestedModel,
normalized,
remapped: requestedModel !== normalized,
promptFamily: profile.promptFamily,
capabilities: getModelCapabilities(normalized),
};
}

function formatModelInspection(model: ModelInspection): string {
const route = model.remapped
? `${model.requested} -> ${model.normalized}`
: model.normalized;
return [
route,
`prompt family ${model.promptFamily}`,
`tool search ${model.capabilities.toolSearch ? "yes" : "no"}`,
`computer use ${model.capabilities.computerUse ? "yes" : "no"}`,
].join(" | ");
}

function collapseWhitespace(value: string): string {
return value.replace(/\s+/g, " ").trim();
}
Expand Down Expand Up @@ -1898,6 +1935,7 @@ async function runHealthCheck(options: HealthCheckOptions = {}): Promise<void> {
const forceRefresh = options.forceRefresh === true;
const liveProbe = options.liveProbe === true;
const probeModel = options.model?.trim() || "gpt-5-codex";
const modelInspection = inspectRequestedModel(probeModel);
const display = options.display ?? DEFAULT_DASHBOARD_DISPLAY_SETTINGS;
const quotaCache = liveProbe ? await loadQuotaCache() : null;
const workingQuotaCache = quotaCache ? cloneQuotaCacheData(quotaCache) : null;
Expand Down Expand Up @@ -1926,6 +1964,7 @@ async function runHealthCheck(options: HealthCheckOptions = {}): Promise<void> {
: `Checking ${storage.accounts.length} account(s) with quick check${liveProbe ? " + live check" : ""}...`,
"accent",
));
console.log(stylePromptText(`Model probe: ${formatModelInspection(modelInspection)}`, "muted"));
for (let i = 0; i < storage.accounts.length; i += 1) {
const account = storage.accounts[i];
if (!account) continue;
Expand Down Expand Up @@ -1954,7 +1993,7 @@ async function runHealthCheck(options: HealthCheckOptions = {}): Promise<void> {
const snapshot = await fetchCodexQuotaSnapshot({
accountId: probeAccountId,
accessToken: currentAccessToken,
model: probeModel,
model: modelInspection.normalized,
});
if (workingQuotaCache) {
quotaCacheChanged =
Expand Down Expand Up @@ -2045,7 +2084,7 @@ async function runHealthCheck(options: HealthCheckOptions = {}): Promise<void> {
const snapshot = await fetchCodexQuotaSnapshot({
accountId: probeAccountId,
accessToken: result.access,
model: probeModel,
model: modelInspection.normalized,
});
if (workingQuotaCache) {
quotaCacheChanged =
Expand Down Expand Up @@ -2570,6 +2609,7 @@ async function runForecast(args: string[]): Promise<number> {
}
const options = parsedArgs.options;
const display = DEFAULT_DASHBOARD_DISPLAY_SETTINGS;
const probeModel = inspectRequestedModel(options.model?.trim() || "gpt-5-codex").normalized;
const quotaCache = options.live ? await loadQuotaCache() : null;
const workingQuotaCache = quotaCache ? cloneQuotaCacheData(quotaCache) : null;
let quotaCacheChanged = false;
Expand Down Expand Up @@ -2620,7 +2660,7 @@ async function runForecast(args: string[]): Promise<number> {
const liveQuota = await fetchCodexQuotaSnapshot({
accountId: probeAccountId,
accessToken: probeAccessToken,
model: options.model,
model: probeModel,
});
liveQuotaByIndex.set(i, liveQuota);
if (workingQuotaCache) {
Expand Down Expand Up @@ -2767,6 +2807,8 @@ async function runReport(args: string[]): Promise<number> {
return 1;
}
const options = parsedArgs.options;
const requestedModel = options.model?.trim() || "gpt-5-codex";
const modelInspection = inspectRequestedModel(requestedModel);

setStoragePath(null);
const storagePath = getStoragePath();
Expand Down Expand Up @@ -2802,7 +2844,7 @@ async function runReport(args: string[]): Promise<number> {
const liveQuota = await fetchCodexQuotaSnapshot({
accountId,
accessToken: refreshResult.access,
model: options.model,
model: modelInspection.normalized,
});
liveQuotaByIndex.set(i, liveQuota);
} catch (error) {
Expand Down Expand Up @@ -2848,6 +2890,13 @@ async function runReport(args: string[]): Promise<number> {
generatedAt: new Date(now).toISOString(),
storagePath,
model: options.model,
modelSelection: {
requested: modelInspection.requested,
normalized: modelInspection.normalized,
remapped: modelInspection.remapped,
promptFamily: modelInspection.promptFamily,
capabilities: modelInspection.capabilities,
},
liveProbe: options.live,
accounts: {
total: accountCount,
Expand Down Expand Up @@ -2878,6 +2927,7 @@ async function runReport(args: string[]): Promise<number> {

console.log(`Report generated at ${report.generatedAt}`);
console.log(`Storage: ${report.storagePath}`);
console.log(`Model: ${formatModelInspection(modelInspection)}`);
console.log(
`Accounts: ${report.accounts.total} total (${report.accounts.enabled} enabled, ${report.accounts.disabled} disabled, ${report.accounts.coolingDown} cooling, ${report.accounts.rateLimited} rate-limited)`,
);
Expand Down Expand Up @@ -3361,6 +3411,7 @@ async function runFix(args: string[]): Promise<number> {
}
const options = parsedArgs.options;
const display = DEFAULT_DASHBOARD_DISPLAY_SETTINGS;
const probeModel = inspectRequestedModel(options.model?.trim() || "gpt-5-codex").normalized;
const quotaCache = options.live ? await loadQuotaCache() : null;
const workingQuotaCache = quotaCache ? cloneQuotaCacheData(quotaCache) : null;
let quotaCacheChanged = false;
Expand Down Expand Up @@ -3409,7 +3460,7 @@ async function runFix(args: string[]): Promise<number> {
const snapshot = await fetchCodexQuotaSnapshot({
accountId: probeAccountId,
accessToken: currentAccessToken,
model: options.model,
model: probeModel,
});
if (workingQuotaCache) {
quotaCacheChanged =
Expand Down Expand Up @@ -3505,7 +3556,7 @@ async function runFix(args: string[]): Promise<number> {
const snapshot = await fetchCodexQuotaSnapshot({
accountId: probeAccountId,
accessToken: refreshResult.access,
model: options.model,
model: probeModel,
});
if (workingQuotaCache) {
quotaCacheChanged =
Expand Down Expand Up @@ -4836,6 +4887,7 @@ async function runBest(args: string[]): Promise<number> {
}

const now = Date.now();
const probeModel = inspectRequestedModel(options.model?.trim() || "gpt-5-codex").normalized;
const refreshFailures = new Map<number, TokenFailure>();
const liveQuotaByIndex = new Map<number, Awaited<ReturnType<typeof fetchCodexQuotaSnapshot>>>();
const probeIdTokenByIndex = new Map<number, string>();
Expand Down Expand Up @@ -4918,7 +4970,7 @@ async function runBest(args: string[]): Promise<number> {
const liveQuota = await fetchCodexQuotaSnapshot({
accountId: probeAccountId,
accessToken: probeAccessToken,
model: options.model,
model: probeModel,
});
liveQuotaByIndex.set(i, liveQuota);
} catch (error) {
Expand Down
53 changes: 17 additions & 36 deletions lib/prompts/codex.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { fileURLToPath } from "node:url";
import type { CacheMetadata, GitHubRelease } from "../types.js";
import { logWarn, logError, logDebug } from "../logger.js";
import { getCodexCacheDir } from "../runtime-paths.js";
import { getModelProfile, type PromptModelFamily } from "../request/helpers/model-map.js";

const GITHUB_API_RELEASES =
"https://api.github.com/repos/openai/codex/releases/latest";
Expand Down Expand Up @@ -44,12 +45,7 @@ function setCacheEntry(key: string, value: { content: string; timestamp: number
* Model family type for prompt selection
* Maps to different system prompts in the Codex CLI
*/
export type ModelFamily =
| "gpt-5-codex"
| "codex-max"
| "codex"
| "gpt-5.2"
| "gpt-5.1";
export type ModelFamily = PromptModelFamily;

/**
* All supported model families
Expand Down Expand Up @@ -87,38 +83,16 @@ const CACHE_FILES: Record<ModelFamily, string> = {
};

/**
* Determine the model family based on the normalized model name
* @param normalizedModel - The normalized model name (e.g., "gpt-5-codex", "gpt-5.1-codex-max", "gpt-5.2", "gpt-5.1")
* Determine the prompt family based on the effective model name.
*
* GPT-5.4-era general-purpose models intentionally stay on the GPT-5.2 prompt
* family until upstream Codex releases a newer general prompt file.
*
* @param normalizedModel - The normalized model name (e.g., "gpt-5-codex", "gpt-5.4", "gpt-5-mini")
* @returns The model family for prompt selection
*/
export function getModelFamily(normalizedModel: string): ModelFamily {
if (normalizedModel.includes("codex-max")) {
return "codex-max";
}
if (
normalizedModel.includes("gpt-5-codex") ||
normalizedModel.includes("gpt 5 codex") ||
normalizedModel.includes("gpt-5.3-codex-spark") ||
normalizedModel.includes("gpt 5.3 codex spark") ||
normalizedModel.includes("gpt-5.3-codex") ||
normalizedModel.includes("gpt 5.3 codex") ||
normalizedModel.includes("gpt-5.2-codex") ||
normalizedModel.includes("gpt 5.2 codex") ||
normalizedModel.includes("gpt-5.1-codex") ||
normalizedModel.includes("gpt 5.1 codex")
) {
return "gpt-5-codex";
}
if (
normalizedModel.includes("codex") ||
normalizedModel.startsWith("codex-")
) {
return "codex";
}
if (normalizedModel.includes("gpt-5.2")) {
return "gpt-5.2";
}
return "gpt-5.1";
return getModelProfile(normalizedModel).promptFamily;
}

async function readFileOrNull(path: string): Promise<string | null> {
Expand Down Expand Up @@ -396,8 +370,15 @@ function refreshInstructionsInBackground(
* Prewarm instruction caches for the provided models/families.
*/
export function prewarmCodexInstructions(models: string[] = []): void {
const candidates = models.length > 0 ? models : ["gpt-5-codex", "gpt-5.2", "gpt-5.1"];
const candidates = models.length > 0 ? models : ["gpt-5-codex", "gpt-5.4", "gpt-5.1"];
const prewarmTargets = new Map<string, string>();
for (const model of candidates) {
const promptFamily = getModelFamily(model);
if (!prewarmTargets.has(promptFamily)) {
prewarmTargets.set(promptFamily, model);
}
}
for (const model of prewarmTargets.values()) {
void getCodexInstructions(model).catch((error) => {
logDebug("Codex instruction prewarm failed", {
model,
Expand Down
Loading