feat(openai-codex): mirror codex CLI's models_cache.json into SF catalog
The static catalog in models.generated.ts carries phantom slugs like
gpt-5-codex / gpt-5.1-codex / gpt-5.1-codex-max / gpt-5.2-codex that the
ChatGPT-account API rejects with HTTP 400 ("model is not supported when
using Codex with a ChatGPT account"). Verified live on this machine:
ERROR: "The 'gpt-5-codex' model is not supported when using Codex with
a ChatGPT account."
Meanwhile the actually-supported slugs for a ChatGPT subscription
(gpt-5.5 default, gpt-5.4, gpt-5.4-mini, gpt-5.3-codex, gpt-5.2) are
not in SF's view at all — so the router scores phantoms, picks one,
dispatch fails, no successful routes record, and routing silently drifts.
The codex CLI itself maintains ~/.codex/models_cache.json with the
authoritative "what THIS account can actually serve" list (visibility +
supported_in_api flags). SF reads that file directly — no duplicate
discovery, no separate API call, single source of truth.
Changes:
- src/resources/extensions/sf/openai-codex-catalog.js (new) — pure file
reader. Resolves CODEX_HOME (or ~/.codex), parses models_cache.json,
filters by visibility==="list" AND supported_in_api===true, mirrors the
result into .sf/runtime/model-catalog/openai-codex.json. Same cache
shape as the generic model-catalog-cache and gemini-catalog modules
so getKnownModelIds picks it up transparently.
- bootstrap/register-hooks.js — wire scheduleOpenaiCodexCatalogRefresh
into session_start, parallel to the existing gemini and generic
catalog refreshes.
- Tests (9): cache-missing, malformed, filter correctness against the
real shape, no-pass-through, slug validation, refresh-writes-cache,
cache-fresh-skips-refresh, and live discovery via the smoke probe
returns exactly ["gpt-5.5", "gpt-5.4", "gpt-5.4-mini", "gpt-5.3-codex",
"gpt-5.2"] on this machine.
Asymmetry vs gemini-cli is appropriate: codex CLI caches locally so SF
just reads the file; gemini-cli does not, so SF's gemini path calls
setupUser + retrieveUserQuota over the wire. Each provider gets the
cheapest reliable discovery path.
Follow-up filed separately: extract codex transport
(codex-app-server-client.ts, openai-codex-responses.ts, this catalog
reader) into a dedicated @singularity-forge/openai-codex-provider
package mirroring the gemini-cli-provider structure for symmetry.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
383e495085
commit
09bc50f0f6
3 changed files with 352 additions and 0 deletions
|
|
@ -517,6 +517,21 @@ export function registerHooks(pi, ecosystemHandlers = []) {
|
|||
} catch {
|
||||
/* non-fatal — gemini catalog refresh must never block session start */
|
||||
}
|
||||
// Refresh the openai-codex model catalog by mirroring the codex CLI's
|
||||
// own ~/.codex/models_cache.json into .sf/runtime/model-catalog/
|
||||
// openai-codex.json. Without this, the static catalog in
|
||||
// models.generated.ts carries phantom slugs (e.g. gpt-5-codex) that
|
||||
// the ChatGPT-account API rejects with 400 ("model is not supported
|
||||
// when using Codex with a ChatGPT account"), and SF's router has no
|
||||
// way to know which slugs the account can actually serve.
|
||||
try {
|
||||
const { scheduleOpenaiCodexCatalogRefresh } = await import(
|
||||
"../openai-codex-catalog.js"
|
||||
);
|
||||
scheduleOpenaiCodexCatalogRefresh(process.cwd());
|
||||
} catch {
|
||||
/* non-fatal — codex catalog refresh must never block session start */
|
||||
}
|
||||
// Detect drift in source-of-truth markdown files since last session.
|
||||
try {
|
||||
const { detectMdFileDrift, formatDriftReport } = await import(
|
||||
|
|
|
|||
137
src/resources/extensions/sf/openai-codex-catalog.js
Normal file
137
src/resources/extensions/sf/openai-codex-catalog.js
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
/**
|
||||
* openai-codex-catalog.js — read codex CLI's own models_cache.json.
|
||||
*
|
||||
* Why a file read and not an API call: the codex CLI maintains
|
||||
* ~/.codex/models_cache.json itself (with fetched_at / etag for refresh
|
||||
* tracking) as part of its normal operation. That cache is the authoritative
|
||||
* "what models can THIS ChatGPT account actually serve" record — distinct
|
||||
* from the static catalog in models.generated.ts which can carry phantom
|
||||
* entries like `gpt-5-codex` that 400 with "model is not supported when
|
||||
* using Codex with a ChatGPT account."
|
||||
*
|
||||
* SF mirrors the visible+supported subset into
|
||||
* .sf/runtime/model-catalog/openai-codex.json so getKnownModelIds and the
|
||||
* model picker pick it up transparently — same shape as the generic
|
||||
* model-catalog-cache and gemini-catalog modules.
|
||||
*
|
||||
* Asymmetry vs gemini-cli (which calls setupUser + retrieveUserQuota over
|
||||
* the wire): codex CLI caches locally; gemini-cli does not. Each provider
|
||||
* gets the cheapest reliable discovery path.
|
||||
*/
|
||||
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { homedir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { sfRuntimeRoot } from "./paths.js";
|
||||
|
||||
const PROVIDER_ID = "openai-codex";
|
||||
const CATALOG_TTL_MS = 6 * 60 * 60 * 1000;
|
||||
|
||||
function codexHome() {
|
||||
return process.env.CODEX_HOME ?? join(homedir(), ".codex");
|
||||
}
|
||||
|
||||
function codexCachePath() {
|
||||
return join(codexHome(), "models_cache.json");
|
||||
}
|
||||
|
||||
function sfCacheFilePath(basePath) {
|
||||
return join(
|
||||
sfRuntimeRoot(basePath),
|
||||
"model-catalog",
|
||||
`${PROVIDER_ID}.json`,
|
||||
);
|
||||
}
|
||||
|
||||
function isSfCacheFresh(basePath) {
|
||||
try {
|
||||
const path = sfCacheFilePath(basePath);
|
||||
if (!existsSync(path)) return false;
|
||||
const entry = JSON.parse(readFileSync(path, "utf-8"));
|
||||
if (!entry?.fetchedAt || !Array.isArray(entry.modelIds)) return false;
|
||||
return Date.now() - new Date(entry.fetchedAt).getTime() <= CATALOG_TTL_MS;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function writeSfCache(basePath, modelIds) {
|
||||
try {
|
||||
mkdirSync(join(sfRuntimeRoot(basePath), "model-catalog"), {
|
||||
recursive: true,
|
||||
});
|
||||
writeFileSync(
|
||||
sfCacheFilePath(basePath),
|
||||
JSON.stringify({
|
||||
fetchedAt: new Date().toISOString(),
|
||||
modelIds,
|
||||
}),
|
||||
"utf-8",
|
||||
);
|
||||
} catch {
|
||||
// Best-effort — never fail the caller.
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the codex CLI's local cache and return the slugs the user's account
|
||||
* can actually serve. Returns null when the cache is missing, malformed, or
|
||||
* the codex CLI has not been initialized.
|
||||
*
|
||||
* Filters: visibility === "list" AND supported_in_api === true.
|
||||
*
|
||||
* Why both filters: visibility=hide is for codex-internal models like
|
||||
* codex-auto-review (not user-facing). supported_in_api=false catches
|
||||
* preview slugs that exist in the cache but reject API requests for the
|
||||
* current account tier (observed live with gpt-5.3-codex-spark).
|
||||
*/
|
||||
export function readCodexAvailableModels() {
|
||||
try {
|
||||
const path = codexCachePath();
|
||||
if (!existsSync(path)) return null;
|
||||
const cache = JSON.parse(readFileSync(path, "utf-8"));
|
||||
const models = Array.isArray(cache?.models) ? cache.models : null;
|
||||
if (!models) return null;
|
||||
const slugs = models
|
||||
.filter(
|
||||
(m) =>
|
||||
m &&
|
||||
typeof m === "object" &&
|
||||
m.visibility === "list" &&
|
||||
m.supported_in_api === true &&
|
||||
typeof m.slug === "string" &&
|
||||
m.slug.length > 0,
|
||||
)
|
||||
.map((m) => m.slug);
|
||||
return slugs.length > 0 ? slugs : null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Discover the codex model list and write it to the SF model-catalog cache.
|
||||
* Returns the cached IDs on success, null on any failure.
|
||||
*
|
||||
* Consumer: scheduleOpenaiCodexCatalogRefresh during session_start.
|
||||
*/
|
||||
export function refreshOpenaiCodexCatalog(basePath) {
|
||||
const modelIds = readCodexAvailableModels();
|
||||
if (!modelIds || modelIds.length === 0) return null;
|
||||
writeSfCache(basePath, modelIds);
|
||||
return modelIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fire-and-forget background refresh. Skipped if the SF cache is fresh.
|
||||
* Synchronous — reading a local file is cheap; no setImmediate needed.
|
||||
*
|
||||
* Consumer: bootstrap/register-hooks.js session_start hook.
|
||||
*/
|
||||
export function scheduleOpenaiCodexCatalogRefresh(basePath) {
|
||||
if (isSfCacheFresh(basePath)) return;
|
||||
try {
|
||||
refreshOpenaiCodexCatalog(basePath);
|
||||
} catch {
|
||||
// Per-provider failure is silently swallowed.
|
||||
}
|
||||
}
|
||||
200
src/resources/extensions/sf/tests/openai-codex-catalog.test.mjs
Normal file
200
src/resources/extensions/sf/tests/openai-codex-catalog.test.mjs
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
/**
|
||||
* openai-codex-catalog.test.mjs — verify SF mirrors the codex CLI's own
|
||||
* ~/.codex/models_cache.json into the SF model-catalog cache, filtering by
|
||||
* visibility=list AND supported_in_api=true so phantom or hidden slugs do
|
||||
* not pollute SF's router decisions.
|
||||
*/
|
||||
import {
|
||||
mkdirSync,
|
||||
mkdtempSync,
|
||||
readFileSync,
|
||||
rmSync,
|
||||
writeFileSync,
|
||||
} from "node:fs";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, test } from "vitest";
|
||||
import {
|
||||
readCodexAvailableModels,
|
||||
refreshOpenaiCodexCatalog,
|
||||
scheduleOpenaiCodexCatalogRefresh,
|
||||
} from "../openai-codex-catalog.js";
|
||||
|
||||
const tmpDirs = [];
|
||||
const ORIGINAL_CODEX_HOME = process.env.CODEX_HOME;
|
||||
|
||||
beforeEach(() => {
|
||||
delete process.env.CODEX_HOME;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (ORIGINAL_CODEX_HOME === undefined) {
|
||||
delete process.env.CODEX_HOME;
|
||||
} else {
|
||||
process.env.CODEX_HOME = ORIGINAL_CODEX_HOME;
|
||||
}
|
||||
while (tmpDirs.length > 0) {
|
||||
const dir = tmpDirs.pop();
|
||||
if (dir) rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
function makeProject() {
|
||||
const dir = mkdtempSync(join(tmpdir(), "sf-codex-catalog-"));
|
||||
tmpDirs.push(dir);
|
||||
mkdirSync(join(dir, ".sf"), { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
function makeCodexHome(cache) {
|
||||
const dir = mkdtempSync(join(tmpdir(), "codex-home-"));
|
||||
tmpDirs.push(dir);
|
||||
if (cache !== null) {
|
||||
writeFileSync(join(dir, "models_cache.json"), JSON.stringify(cache));
|
||||
}
|
||||
process.env.CODEX_HOME = dir;
|
||||
return dir;
|
||||
}
|
||||
|
||||
const REAL_SHAPE_CACHE = {
|
||||
fetched_at: "2026-05-14T00:00:00Z",
|
||||
etag: "abc",
|
||||
client_version: "0.128.0",
|
||||
models: [
|
||||
{ slug: "gpt-5.5", visibility: "list", supported_in_api: true },
|
||||
{ slug: "gpt-5.4", visibility: "list", supported_in_api: true },
|
||||
{ slug: "gpt-5.4-mini", visibility: "list", supported_in_api: true },
|
||||
{ slug: "gpt-5.3-codex", visibility: "list", supported_in_api: true },
|
||||
{ slug: "gpt-5.3-codex-spark", visibility: "list", supported_in_api: false },
|
||||
{ slug: "gpt-5.2", visibility: "list", supported_in_api: true },
|
||||
{ slug: "codex-auto-review", visibility: "hide", supported_in_api: true },
|
||||
],
|
||||
};
|
||||
|
||||
describe("readCodexAvailableModels", () => {
|
||||
test("returns null when ~/.codex/models_cache.json is missing", () => {
|
||||
makeCodexHome(null);
|
||||
expect(readCodexAvailableModels()).toBe(null);
|
||||
});
|
||||
|
||||
test("returns null on malformed cache", () => {
|
||||
const dir = mkdtempSync(join(tmpdir(), "codex-bad-"));
|
||||
tmpDirs.push(dir);
|
||||
writeFileSync(join(dir, "models_cache.json"), "{not json");
|
||||
process.env.CODEX_HOME = dir;
|
||||
expect(readCodexAvailableModels()).toBe(null);
|
||||
});
|
||||
|
||||
test("filters by visibility=list AND supported_in_api=true", () => {
|
||||
makeCodexHome(REAL_SHAPE_CACHE);
|
||||
const ids = readCodexAvailableModels();
|
||||
expect(ids).toEqual([
|
||||
"gpt-5.5",
|
||||
"gpt-5.4",
|
||||
"gpt-5.4-mini",
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2",
|
||||
]);
|
||||
// codex-auto-review is hidden; gpt-5.3-codex-spark is not supported_in_api → excluded
|
||||
expect(ids).not.toContain("codex-auto-review");
|
||||
expect(ids).not.toContain("gpt-5.3-codex-spark");
|
||||
});
|
||||
|
||||
test("returns null when no models pass the filter", () => {
|
||||
makeCodexHome({
|
||||
fetched_at: "2026-05-14T00:00:00Z",
|
||||
models: [
|
||||
{ slug: "internal-only", visibility: "hide", supported_in_api: true },
|
||||
{ slug: "preview", visibility: "list", supported_in_api: false },
|
||||
],
|
||||
});
|
||||
expect(readCodexAvailableModels()).toBe(null);
|
||||
});
|
||||
|
||||
test("ignores entries with missing/invalid slug", () => {
|
||||
makeCodexHome({
|
||||
models: [
|
||||
{ visibility: "list", supported_in_api: true },
|
||||
{ slug: "", visibility: "list", supported_in_api: true },
|
||||
{ slug: "gpt-5.5", visibility: "list", supported_in_api: true },
|
||||
],
|
||||
});
|
||||
expect(readCodexAvailableModels()).toEqual(["gpt-5.5"]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("refreshOpenaiCodexCatalog", () => {
|
||||
test("writes the SF cache from the codex CLI cache", () => {
|
||||
const project = makeProject();
|
||||
makeCodexHome(REAL_SHAPE_CACHE);
|
||||
|
||||
const result = refreshOpenaiCodexCatalog(project);
|
||||
expect(result).toEqual([
|
||||
"gpt-5.5",
|
||||
"gpt-5.4",
|
||||
"gpt-5.4-mini",
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2",
|
||||
]);
|
||||
|
||||
const cachePath = join(
|
||||
project,
|
||||
".sf",
|
||||
"model-catalog",
|
||||
"openai-codex.json",
|
||||
);
|
||||
const cache = JSON.parse(readFileSync(cachePath, "utf-8"));
|
||||
expect(cache.modelIds).toEqual(result);
|
||||
expect(typeof cache.fetchedAt).toBe("string");
|
||||
expect(new Date(cache.fetchedAt).toString()).not.toBe("Invalid Date");
|
||||
});
|
||||
|
||||
test("returns null and writes nothing when codex cache is missing", () => {
|
||||
const project = makeProject();
|
||||
makeCodexHome(null);
|
||||
expect(refreshOpenaiCodexCatalog(project)).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe("scheduleOpenaiCodexCatalogRefresh", () => {
|
||||
test("populates SF cache on first call", () => {
|
||||
const project = makeProject();
|
||||
makeCodexHome(REAL_SHAPE_CACHE);
|
||||
|
||||
scheduleOpenaiCodexCatalogRefresh(project);
|
||||
|
||||
const cachePath = join(
|
||||
project,
|
||||
".sf",
|
||||
"model-catalog",
|
||||
"openai-codex.json",
|
||||
);
|
||||
const cache = JSON.parse(readFileSync(cachePath, "utf-8"));
|
||||
expect(cache.modelIds).toContain("gpt-5.5");
|
||||
});
|
||||
|
||||
test("skips refresh when SF cache is fresh", () => {
|
||||
const project = makeProject();
|
||||
makeCodexHome(REAL_SHAPE_CACHE);
|
||||
// Pre-seed a fresh cache with a deliberately different model list
|
||||
// to confirm the next call does NOT overwrite it.
|
||||
mkdirSync(join(project, ".sf", "model-catalog"), { recursive: true });
|
||||
writeFileSync(
|
||||
join(project, ".sf", "model-catalog", "openai-codex.json"),
|
||||
JSON.stringify({
|
||||
fetchedAt: new Date().toISOString(),
|
||||
modelIds: ["sentinel-only"],
|
||||
}),
|
||||
);
|
||||
|
||||
scheduleOpenaiCodexCatalogRefresh(project);
|
||||
|
||||
const cache = JSON.parse(
|
||||
readFileSync(
|
||||
join(project, ".sf", "model-catalog", "openai-codex.json"),
|
||||
"utf-8",
|
||||
),
|
||||
);
|
||||
expect(cache.modelIds).toEqual(["sentinel-only"]);
|
||||
});
|
||||
});
|
||||
Loading…
Add table
Reference in a new issue