Skip to content

Commit 5cb8f81

Browse files
committed
fix(models): keep --all aligned with synthetic catalog rows
1 parent 0b452a5 commit 5cb8f81

File tree

2 files changed

+174
-0
lines changed

2 files changed

+174
-0
lines changed

src/commands/models/list.list-command.forward-compat.test.ts

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ const mocks = vi.hoisted(() => {
3838
loadModelRegistry: vi
3939
.fn()
4040
.mockResolvedValue({ models: [], availableKeys: new Set(), registry: {} }),
41+
loadModelCatalog: vi.fn().mockResolvedValue([]),
4142
resolveConfiguredEntries: vi.fn().mockReturnValue({
4243
entries: [
4344
{
@@ -77,6 +78,10 @@ vi.mock("../../agents/auth-profiles.js", async (importOriginal) => {
7778
};
7879
});
7980

81+
vi.mock("../../agents/model-catalog.js", () => ({
82+
loadModelCatalog: mocks.loadModelCatalog,
83+
}));
84+
8085
vi.mock("./list.registry.js", async (importOriginal) => {
8186
const actual = await importOriginal<typeof import("./list.registry.js")>();
8287
return {
@@ -198,6 +203,133 @@ describe("modelsListCommand forward-compat", () => {
198203
);
199204
});
200205

206+
it("includes synthetic codex gpt-5.4 in --all output when catalog supports it", async () => {
207+
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
208+
mocks.loadModelRegistry.mockResolvedValueOnce({
209+
models: [
210+
{
211+
provider: "openai-codex",
212+
id: "gpt-5.3-codex",
213+
name: "GPT-5.3 Codex",
214+
api: "openai-codex-responses",
215+
baseUrl: "https://chatgpt.com/backend-api",
216+
input: ["text"],
217+
contextWindow: 272000,
218+
maxTokens: 128000,
219+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
220+
},
221+
],
222+
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
223+
registry: {},
224+
});
225+
mocks.loadModelCatalog.mockResolvedValueOnce([
226+
{
227+
provider: "openai-codex",
228+
id: "gpt-5.3-codex",
229+
name: "GPT-5.3 Codex",
230+
input: ["text"],
231+
contextWindow: 272000,
232+
},
233+
{
234+
provider: "openai-codex",
235+
id: "gpt-5.4",
236+
name: "GPT-5.4",
237+
input: ["text"],
238+
contextWindow: 272000,
239+
},
240+
]);
241+
mocks.listProfilesForProvider.mockImplementationOnce((_: unknown, provider: string) =>
242+
provider === "openai-codex" ? ([{ id: "profile-1" }] as Array<Record<string, unknown>>) : [],
243+
);
244+
mocks.resolveModelWithRegistry.mockImplementation(
245+
({ provider, modelId }: { provider: string; modelId: string }) => {
246+
if (provider !== "openai-codex") {
247+
return undefined;
248+
}
249+
if (modelId === "gpt-5.3-codex") {
250+
return {
251+
provider: "openai-codex",
252+
id: "gpt-5.3-codex",
253+
name: "GPT-5.3 Codex",
254+
api: "openai-codex-responses",
255+
baseUrl: "https://chatgpt.com/backend-api",
256+
input: ["text"],
257+
contextWindow: 272000,
258+
maxTokens: 128000,
259+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
260+
};
261+
}
262+
if (modelId === "gpt-5.4") {
263+
return {
264+
provider: "openai-codex",
265+
id: "gpt-5.4",
266+
name: "GPT-5.4",
267+
api: "openai-codex-responses",
268+
baseUrl: "https://chatgpt.com/backend-api",
269+
input: ["text"],
270+
contextWindow: 272000,
271+
maxTokens: 128000,
272+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
273+
};
274+
}
275+
return undefined;
276+
},
277+
);
278+
const runtime = { log: vi.fn(), error: vi.fn() };
279+
280+
await modelsListCommand({ all: true, provider: "openai-codex", json: true }, runtime as never);
281+
282+
expect(mocks.printModelTable).toHaveBeenCalled();
283+
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{
284+
key: string;
285+
available: boolean;
286+
}>;
287+
288+
expect(rows).toEqual([
289+
expect.objectContaining({
290+
key: "openai-codex/gpt-5.3-codex",
291+
}),
292+
expect.objectContaining({
293+
key: "openai-codex/gpt-5.4",
294+
available: true,
295+
}),
296+
]);
297+
});
298+
299+
it("keeps discovered rows in --all output when catalog lookup is empty", async () => {
300+
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
301+
mocks.loadModelRegistry.mockResolvedValueOnce({
302+
models: [
303+
{
304+
provider: "openai-codex",
305+
id: "gpt-5.3-codex",
306+
name: "GPT-5.3 Codex",
307+
api: "openai-codex-responses",
308+
baseUrl: "https://chatgpt.com/backend-api",
309+
input: ["text"],
310+
contextWindow: 272000,
311+
maxTokens: 128000,
312+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
313+
},
314+
],
315+
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
316+
registry: {},
317+
});
318+
mocks.loadModelCatalog.mockResolvedValueOnce([]);
319+
const runtime = { log: vi.fn(), error: vi.fn() };
320+
321+
await modelsListCommand({ all: true, provider: "openai-codex", json: true }, runtime as never);
322+
323+
expect(mocks.printModelTable).toHaveBeenCalled();
324+
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{ key: string }>;
325+
326+
expect(rows).toEqual([
327+
expect.objectContaining({
328+
key: "openai-codex/gpt-5.3-codex",
329+
}),
330+
]);
331+
});
332+
201333
it("exits with an error when configured-mode listing has no model registry", async () => {
202334
vi.clearAllMocks();
203335
const previousExitCode = process.exitCode;

src/commands/models/list.list-command.ts

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import type { Api, Model } from "@mariozechner/pi-ai";
22
import type { ModelRegistry } from "@mariozechner/pi-coding-agent";
3+
import { loadModelCatalog } from "../../agents/model-catalog.js";
34
import { parseModelRef } from "../../agents/model-selection.js";
45
import { resolveModelWithRegistry } from "../../agents/pi-embedded-runner/model.js";
56
import type { RuntimeEnv } from "../../runtime.js";
@@ -65,6 +66,7 @@ export async function modelsListCommand(
6566
const rows: ModelRow[] = [];
6667

6768
if (opts.all) {
69+
const seenKeys = new Set<string>();
6870
const sorted = [...models].toSorted((a, b) => {
6971
const p = a.provider.localeCompare(b.provider);
7072
if (p !== 0) {
@@ -93,6 +95,46 @@ export async function modelsListCommand(
9395
authStore,
9496
}),
9597
);
98+
seenKeys.add(key);
99+
}
100+
101+
if (modelRegistry) {
102+
const catalog = await loadModelCatalog({ config: cfg });
103+
for (const entry of catalog) {
104+
if (providerFilter && entry.provider.toLowerCase() !== providerFilter) {
105+
continue;
106+
}
107+
const key = modelKey(entry.provider, entry.id);
108+
if (seenKeys.has(key)) {
109+
continue;
110+
}
111+
const model = resolveModelWithRegistry({
112+
provider: entry.provider,
113+
modelId: entry.id,
114+
modelRegistry,
115+
cfg,
116+
});
117+
if (!model) {
118+
continue;
119+
}
120+
if (opts.local && !isLocalBaseUrl(model.baseUrl)) {
121+
continue;
122+
}
123+
const configured = configuredByKey.get(key);
124+
rows.push(
125+
toModelRow({
126+
model,
127+
key,
128+
tags: configured ? Array.from(configured.tags) : [],
129+
aliases: configured?.aliases ?? [],
130+
availableKeys,
131+
cfg,
132+
authStore,
133+
allowProviderAvailabilityFallback: !discoveredKeys.has(key),
134+
}),
135+
);
136+
seenKeys.add(key);
137+
}
96138
}
97139
} else {
98140
const registry = modelRegistry;

0 commit comments

Comments
 (0)