Skip to content

Commit f624391

Browse files
yuweuiijalehman
andauthored
fix(models): use 1M context for openai-codex gpt-5.4 (#37876)
Merged via squash. Prepared head SHA: c410207 Co-authored-by: yuweuii <[email protected]> Co-authored-by: jalehman <[email protected]> Reviewed-by: @jalehman
1 parent b341580 commit f624391

File tree

6 files changed

+26
-15
lines changed

6 files changed

+26
-15
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ Docs: https://docs.openclaw.ai
4848
- Config/runtime snapshots: keep secrets-runtime-resolved config and auth-profile snapshots intact after config writes so follow-up reads still see file-backed secret values while picking up the persisted config update. (#37313) thanks @bbblending.
4949
- Gateway/Control UI: resolve bundled dashboard assets through symlinked global wrappers and auto-detected package roots, while keeping configured and custom roots on the strict hardlink boundary. (#40385) Thanks @LarytheLord.
5050
- Docs/Changelog: correct the contributor credit for the bundled Control UI global-install fix to @LarytheLord. (#40420) Thanks @velvet-shark.
51+
- Models/openai-codex GPT-5.4 forward-compat: use the GPT-5.4 1,050,000-token context window and 128,000 max tokens for `openai-codex/gpt-5.4` instead of inheriting stale legacy Codex limits in resolver fallbacks and model listing. (#37876) thanks @yuweuii.
5152

5253
## 2026.3.7
5354

src/agents/model-compat.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ describe("resolveForwardCompatModel", () => {
363363
expectResolvedForwardCompat(model, { provider: "openai-codex", id: "gpt-5.4" });
364364
expect(model?.api).toBe("openai-codex-responses");
365365
expect(model?.baseUrl).toBe("https://chatgpt.com/backend-api");
366-
expect(model?.contextWindow).toBe(272_000);
366+
expect(model?.contextWindow).toBe(1_050_000);
367367
expect(model?.maxTokens).toBe(128_000);
368368
});
369369

src/agents/model-forward-compat.ts

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
1212
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
1313

1414
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
15+
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000;
16+
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
1517
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
1618
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
1719
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
@@ -123,9 +125,14 @@ function resolveOpenAICodexForwardCompatModel(
123125

124126
let templateIds: readonly string[];
125127
let eligibleProviders: Set<string>;
128+
let patch: Partial<Model<Api>> | undefined;
126129
if (lower === OPENAI_CODEX_GPT_54_MODEL_ID) {
127130
templateIds = OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS;
128131
eligibleProviders = CODEX_GPT54_ELIGIBLE_PROVIDERS;
132+
patch = {
133+
contextWindow: OPENAI_CODEX_GPT_54_CONTEXT_TOKENS,
134+
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
135+
};
129136
} else if (lower === OPENAI_CODEX_GPT_53_MODEL_ID) {
130137
templateIds = OPENAI_CODEX_TEMPLATE_MODEL_IDS;
131138
eligibleProviders = CODEX_GPT53_ELIGIBLE_PROVIDERS;
@@ -146,6 +153,7 @@ function resolveOpenAICodexForwardCompatModel(
146153
...template,
147154
id: trimmedModelId,
148155
name: trimmedModelId,
156+
...patch,
149157
} as Model<Api>);
150158
}
151159

@@ -158,8 +166,8 @@ function resolveOpenAICodexForwardCompatModel(
158166
reasoning: true,
159167
input: ["text", "image"],
160168
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
161-
contextWindow: DEFAULT_CONTEXT_TOKENS,
162-
maxTokens: DEFAULT_CONTEXT_TOKENS,
169+
contextWindow: patch?.contextWindow ?? DEFAULT_CONTEXT_TOKENS,
170+
maxTokens: patch?.maxTokens ?? DEFAULT_CONTEXT_TOKENS,
163171
} as Model<Api>);
164172
}
165173

src/agents/pi-embedded-runner/model.test-harness.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,13 +36,14 @@ export function mockOpenAICodexTemplateModel(): void {
3636
export function buildOpenAICodexForwardCompatExpectation(
3737
id: string = "gpt-5.3-codex",
3838
): Partial<typeof OPENAI_CODEX_TEMPLATE_MODEL> & { provider: string; id: string } {
39+
const isGpt54 = id === "gpt-5.4";
3940
return {
4041
provider: "openai-codex",
4142
id,
4243
api: "openai-codex-responses",
4344
baseUrl: "https://chatgpt.com/backend-api",
4445
reasoning: true,
45-
contextWindow: 272000,
46+
contextWindow: isGpt54 ? 1_050_000 : 272000,
4647
maxTokens: 128000,
4748
};
4849
}

src/cli/daemon-cli/lifecycle.test.ts

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,17 @@ const renderGatewayPortHealthDiagnostics = vi.fn(() => ["diag: unhealthy port"])
3636
const renderRestartDiagnostics = vi.fn(() => ["diag: unhealthy runtime"]);
3737
const resolveGatewayPort = vi.fn(() => 18789);
3838
const findGatewayPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []);
39-
const probeGateway = vi.fn<
40-
(opts: {
41-
url: string;
42-
auth?: { token?: string; password?: string };
43-
timeoutMs: number;
44-
}) => Promise<{
45-
ok: boolean;
46-
configSnapshot: unknown;
47-
}>
48-
>();
39+
const probeGateway =
40+
vi.fn<
41+
(opts: {
42+
url: string;
43+
auth?: { token?: string; password?: string };
44+
timeoutMs: number;
45+
}) => Promise<{
46+
ok: boolean;
47+
configSnapshot: unknown;
48+
}>
49+
>();
4950
const isRestartEnabled = vi.fn<(config?: { commands?: unknown }) => boolean>(() => true);
5051
const loadConfig = vi.fn(() => ({}));
5152

src/commands/models/list.list-command.forward-compat.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ const OPENAI_CODEX_MODEL = {
77
api: "openai-codex-responses",
88
baseUrl: "https://chatgpt.com/backend-api",
99
input: ["text"],
10-
contextWindow: 272000,
10+
contextWindow: 1_050_000,
1111
maxTokens: 128000,
1212
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
1313
};

0 commit comments

Comments
 (0)