Skip to content

Commit 63f82e0

Browse files
Slinesteipete
authored andcommitted
fix: normalize openai-codex gpt-5.4 transport overrides
1 parent 3da8882 commit 63f82e0

File tree

3 files changed

+157
-38
lines changed

3 files changed

+157
-38
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ Docs: https://docs.openclaw.ai
3333
- Hooks/session-memory: keep `/new` and `/reset` memory artifacts in the bound agent workspace and align saved reset session keys with that workspace when stale main-agent keys leak into the hook path. (#39875) thanks @rbutera.
3434
- Sessions/model switch: clear stale cached `contextTokens` when a session changes models so status and runtime paths recompute against the active model window. (#38044) thanks @yuweuii.
3535
- ACP/session history: persist transcripts for successful ACP child runs, preserve exact transcript text, record ACP spawned-session lineage, and keep spawn-time transcript-path persistence best-effort so history storage failures do not block execution. (#40137) thanks @mbelinky.
36+
- Agents/openai-codex: normalize `gpt-5.4` fallback transport back to `openai-codex-responses` on `chatgpt.com/backend-api` when config drifts to the generic OpenAI responses endpoint. (#38736) Thanks @0xsline.
3637
- Browser/CDP: normalize loopback direct WebSocket CDP URLs back to HTTP(S) for `/json/*` tab operations so local `ws://` / `wss://` profiles can still list, focus, open, and close tabs after the new direct-WS support lands. (#31085) Thanks @shrey150.
3738
- Browser/CDP: rewrite wildcard `ws://0.0.0.0` and `ws://[::]` debugger URLs from remote `/json/version` responses back to the external CDP host/port, fixing Browserless-style container endpoints. (#17760) Thanks @joeharouni.
3839
- Browser/extension relay: wait briefly for a previously attached Chrome tab to reappear after transient relay drops before failing with `tab not found`, reducing noisy reconnect flakes. (#32461) Thanks @AaronWander.

src/agents/pi-embedded-runner/model.test.ts

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -664,6 +664,60 @@ describe("resolveModel", () => {
664664
});
665665
});
666666

667+
it("normalizes openai-codex gpt-5.4 overrides away from /v1/responses", () => {
668+
mockOpenAICodexTemplateModel();
669+
670+
const cfg: OpenClawConfig = {
671+
models: {
672+
providers: {
673+
"openai-codex": {
674+
baseUrl: "https://api.openai.com/v1",
675+
api: "openai-responses",
676+
},
677+
},
678+
},
679+
} as unknown as OpenClawConfig;
680+
681+
expectResolvedForwardCompatFallback({
682+
provider: "openai-codex",
683+
id: "gpt-5.4",
684+
cfg,
685+
expectedModel: {
686+
api: "openai-codex-responses",
687+
baseUrl: "https://chatgpt.com/backend-api",
688+
id: "gpt-5.4",
689+
provider: "openai-codex",
690+
},
691+
});
692+
});
693+
694+
it("does not rewrite openai baseUrl when openai-codex api stays non-codex", () => {
695+
mockOpenAICodexTemplateModel();
696+
697+
const cfg: OpenClawConfig = {
698+
models: {
699+
providers: {
700+
"openai-codex": {
701+
baseUrl: "https://api.openai.com/v1",
702+
api: "openai-completions",
703+
},
704+
},
705+
},
706+
} as unknown as OpenClawConfig;
707+
708+
expectResolvedForwardCompatFallback({
709+
provider: "openai-codex",
710+
id: "gpt-5.4",
711+
cfg,
712+
expectedModel: {
713+
api: "openai-completions",
714+
baseUrl: "https://api.openai.com/v1",
715+
id: "gpt-5.4",
716+
provider: "openai-codex",
717+
},
718+
});
719+
});
720+
667721
it("includes auth hint for unknown ollama models (#17328)", () => {
668722
// resetMockDiscoverModels() in beforeEach already sets find → null
669723
const result = resolveModel("ollama", "gemma3:4b", "/tmp/agent");

src/agents/pi-embedded-runner/model.ts

Lines changed: 102 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ type InlineProviderConfig = {
2323
headers?: unknown;
2424
};
2525

26+
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
27+
2628
function sanitizeModelHeaders(
2729
headers: unknown,
2830
opts?: { stripSecretRefMarkers?: boolean },
@@ -43,6 +45,60 @@ function sanitizeModelHeaders(
4345
return Object.keys(next).length > 0 ? next : undefined;
4446
}
4547

48+
function isOpenAIApiBaseUrl(baseUrl?: string): boolean {
49+
const trimmed = baseUrl?.trim();
50+
if (!trimmed) {
51+
return false;
52+
}
53+
return /^https?:\/\/api\.openai\.com(?:\/v1)?\/?$/i.test(trimmed);
54+
}
55+
56+
function isOpenAICodexBaseUrl(baseUrl?: string): boolean {
57+
const trimmed = baseUrl?.trim();
58+
if (!trimmed) {
59+
return false;
60+
}
61+
return /^https?:\/\/chatgpt\.com\/backend-api\/?$/i.test(trimmed);
62+
}
63+
64+
function normalizeOpenAICodexTransport(params: {
65+
provider: string;
66+
model: Model<Api>;
67+
}): Model<Api> {
68+
if (normalizeProviderId(params.provider) !== "openai-codex") {
69+
return params.model;
70+
}
71+
72+
const useCodexTransport =
73+
!params.model.baseUrl ||
74+
isOpenAIApiBaseUrl(params.model.baseUrl) ||
75+
isOpenAICodexBaseUrl(params.model.baseUrl);
76+
77+
const nextApi =
78+
useCodexTransport && params.model.api === "openai-responses"
79+
? ("openai-codex-responses" as const)
80+
: params.model.api;
81+
const nextBaseUrl =
82+
nextApi === "openai-codex-responses" &&
83+
(!params.model.baseUrl || isOpenAIApiBaseUrl(params.model.baseUrl))
84+
? OPENAI_CODEX_BASE_URL
85+
: params.model.baseUrl;
86+
87+
if (nextApi === params.model.api && nextBaseUrl === params.model.baseUrl) {
88+
return params.model;
89+
}
90+
91+
return {
92+
...params.model,
93+
api: nextApi,
94+
baseUrl: nextBaseUrl,
95+
} as Model<Api>;
96+
}
97+
98+
function normalizeResolvedModel(params: { provider: string; model: Model<Api> }): Model<Api> {
99+
return normalizeModelCompat(normalizeOpenAICodexTransport(params));
100+
}
101+
46102
export { buildModelAliasLines };
47103

48104
function resolveConfiguredProviderConfig(
@@ -145,13 +201,14 @@ export function resolveModelWithRegistry(params: {
145201
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
146202

147203
if (model) {
148-
return normalizeModelCompat(
149-
applyConfiguredProviderOverrides({
204+
return normalizeResolvedModel({
205+
provider,
206+
model: applyConfiguredProviderOverrides({
150207
discoveredModel: model,
151208
providerConfig,
152209
modelId,
153210
}),
154-
);
211+
});
155212
}
156213

157214
const providers = cfg?.models?.providers ?? {};
@@ -161,64 +218,71 @@ export function resolveModelWithRegistry(params: {
161218
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
162219
);
163220
if (inlineMatch?.api) {
164-
return normalizeModelCompat(inlineMatch as Model<Api>);
221+
return normalizeResolvedModel({ provider, model: inlineMatch as Model<Api> });
165222
}
166223

167224
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
168225
// Otherwise, configured providers can default to a generic API and break specific transports.
169226
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
170227
if (forwardCompat) {
171-
return normalizeModelCompat(
172-
applyConfiguredProviderOverrides({
228+
return normalizeResolvedModel({
229+
provider,
230+
model: applyConfiguredProviderOverrides({
173231
discoveredModel: forwardCompat,
174232
providerConfig,
175233
modelId,
176234
}),
177-
);
235+
});
178236
}
179237

180238
// OpenRouter is a pass-through proxy - any model ID available on OpenRouter
181239
// should work without being pre-registered in the local catalog.
182240
if (normalizedProvider === "openrouter") {
183-
return normalizeModelCompat({
184-
id: modelId,
185-
name: modelId,
186-
api: "openai-completions",
241+
return normalizeResolvedModel({
187242
provider,
188-
baseUrl: "https://openrouter.ai/api/v1",
189-
reasoning: false,
190-
input: ["text"],
191-
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
192-
contextWindow: DEFAULT_CONTEXT_TOKENS,
193-
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
194-
maxTokens: 8192,
195-
} as Model<Api>);
243+
model: {
244+
id: modelId,
245+
name: modelId,
246+
api: "openai-completions",
247+
provider,
248+
baseUrl: "https://openrouter.ai/api/v1",
249+
reasoning: false,
250+
input: ["text"],
251+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
252+
contextWindow: DEFAULT_CONTEXT_TOKENS,
253+
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
254+
maxTokens: 8192,
255+
} as Model<Api>,
256+
});
196257
}
197258

198259
const configuredModel = providerConfig?.models?.find((candidate) => candidate.id === modelId);
199260
const providerHeaders = sanitizeModelHeaders(providerConfig?.headers);
200261
const modelHeaders = sanitizeModelHeaders(configuredModel?.headers);
201262
if (providerConfig || modelId.startsWith("mock-")) {
202-
return normalizeModelCompat({
203-
id: modelId,
204-
name: modelId,
205-
api: providerConfig?.api ?? "openai-responses",
263+
return normalizeResolvedModel({
206264
provider,
207-
baseUrl: providerConfig?.baseUrl,
208-
reasoning: configuredModel?.reasoning ?? false,
209-
input: ["text"],
210-
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
211-
contextWindow:
212-
configuredModel?.contextWindow ??
213-
providerConfig?.models?.[0]?.contextWindow ??
214-
DEFAULT_CONTEXT_TOKENS,
215-
maxTokens:
216-
configuredModel?.maxTokens ??
217-
providerConfig?.models?.[0]?.maxTokens ??
218-
DEFAULT_CONTEXT_TOKENS,
219-
headers:
220-
providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined,
221-
} as Model<Api>);
265+
model: {
266+
id: modelId,
267+
name: modelId,
268+
api: providerConfig?.api ?? "openai-responses",
269+
provider,
270+
baseUrl: providerConfig?.baseUrl,
271+
reasoning: configuredModel?.reasoning ?? false,
272+
input: ["text"],
273+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
274+
contextWindow:
275+
configuredModel?.contextWindow ??
276+
providerConfig?.models?.[0]?.contextWindow ??
277+
DEFAULT_CONTEXT_TOKENS,
278+
maxTokens:
279+
configuredModel?.maxTokens ??
280+
providerConfig?.models?.[0]?.maxTokens ??
281+
DEFAULT_CONTEXT_TOKENS,
282+
headers:
283+
providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined,
284+
} as Model<Api>,
285+
});
222286
}
223287

224288
return undefined;

0 commit comments

Comments
 (0)