Skip to content

Commit 0839823

Browse files
author
mklinger
committed
feat(models): add azure realtime and forward-compat updates
1 parent 8a3d6a6 commit 0839823

14 files changed

+246
-6
lines changed

src/agents/live-model-filter.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ const ANTHROPIC_PREFIXES = [
1010
"claude-sonnet-4-5",
1111
"claude-haiku-4-5",
1212
];
13-
const OPENAI_MODELS = ["gpt-5.4", "gpt-5.2", "gpt-5.0"];
13+
const OPENAI_MODELS = ["gpt-5.4", "gpt-5.2", "gpt-5.0", "gpt-realtime-1.5", "gpt-realtime-mini"];
1414
const CODEX_MODELS = [
1515
"gpt-5.4",
1616
"gpt-5.2",
@@ -35,8 +35,12 @@ function matchesExactOrPrefix(id: string, values: string[]): boolean {
3535
}
3636

3737
export function isModernModelRef(ref: ModelRef): boolean {
38-
const provider = ref.provider?.trim().toLowerCase() ?? "";
38+
const providerRaw = ref.provider?.trim().toLowerCase() ?? "";
3939
const id = ref.id?.trim().toLowerCase() ?? "";
40+
const provider =
41+
providerRaw === "azure-openai-responses" || providerRaw === "azure-openai-completions"
42+
? "openai"
43+
: providerRaw;
4044
if (!provider || !id) {
4145
return false;
4246
}

src/agents/model-catalog.test.ts

Lines changed: 47 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,23 @@ describe("loadModelCatalog", () => {
4141
expect(first).toEqual([]);
4242

4343
const second = await loadModelCatalog({ config: cfg });
44-
expect(second).toEqual([{ id: "gpt-4.1", name: "GPT-4.1", provider: "openai" }]);
44+
expect(second).toContainEqual(
45+
expect.objectContaining({ id: "gpt-4.1", name: "GPT-4.1", provider: "openai" }),
46+
);
47+
expect(second).toContainEqual(
48+
expect.objectContaining({
49+
id: "gpt-realtime-1.5",
50+
name: "gpt-realtime-1.5",
51+
provider: "openai",
52+
}),
53+
);
54+
expect(second).toContainEqual(
55+
expect.objectContaining({
56+
id: "gpt-realtime-mini",
57+
name: "gpt-realtime-mini",
58+
provider: "openai",
59+
}),
60+
);
4561
expect(getCallCount()).toBe(2);
4662
expect(warnSpy).toHaveBeenCalledTimes(1);
4763
} finally {
@@ -216,6 +232,36 @@ describe("loadModelCatalog", () => {
216232
);
217233
});
218234

235+
it("adds realtime forward-compat catalog entries when template models exist", async () => {
236+
mockPiDiscoveryModels([
237+
{
238+
id: "gpt-5.2",
239+
provider: "openai",
240+
name: "GPT-5.2",
241+
reasoning: true,
242+
contextWindow: 1_050_000,
243+
input: ["text", "image"],
244+
},
245+
]);
246+
247+
const result = await loadModelCatalog({ config: {} as OpenClawConfig });
248+
249+
expect(result).toContainEqual(
250+
expect.objectContaining({
251+
provider: "openai",
252+
id: "gpt-realtime-1.5",
253+
name: "gpt-realtime-1.5",
254+
}),
255+
);
256+
expect(result).toContainEqual(
257+
expect.objectContaining({
258+
provider: "openai",
259+
id: "gpt-realtime-mini",
260+
name: "gpt-realtime-mini",
261+
}),
262+
);
263+
});
264+
219265
it("merges configured models for opted-in non-pi-native providers", async () => {
220266
mockSingleOpenAiCatalogModel();
221267

src/agents/model-catalog.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@ const CODEX_PROVIDER = "openai-codex";
3737
const OPENAI_PROVIDER = "openai";
3838
const OPENAI_GPT54_MODEL_ID = "gpt-5.4";
3939
const OPENAI_GPT54_PRO_MODEL_ID = "gpt-5.4-pro";
40+
const OPENAI_GPT_REALTIME_15_MODEL_ID = "gpt-realtime-1.5";
41+
const OPENAI_GPT_REALTIME_MINI_MODEL_ID = "gpt-realtime-mini";
4042
const OPENAI_CODEX_GPT53_MODEL_ID = "gpt-5.3-codex";
4143
const OPENAI_CODEX_GPT53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
4244
const OPENAI_CODEX_GPT54_MODEL_ID = "gpt-5.4";
@@ -59,6 +61,16 @@ const SYNTHETIC_CATALOG_FALLBACKS: readonly SyntheticCatalogFallback[] = [
5961
id: OPENAI_GPT54_PRO_MODEL_ID,
6062
templateIds: ["gpt-5.2-pro", "gpt-5.2"],
6163
},
64+
{
65+
provider: OPENAI_PROVIDER,
66+
id: OPENAI_GPT_REALTIME_15_MODEL_ID,
67+
templateIds: ["gpt-5.2", "gpt-4.1"],
68+
},
69+
{
70+
provider: OPENAI_PROVIDER,
71+
id: OPENAI_GPT_REALTIME_MINI_MODEL_ID,
72+
templateIds: ["gpt-5.2", "gpt-4.1"],
73+
},
6274
{
6375
provider: CODEX_PROVIDER,
6476
id: OPENAI_CODEX_GPT54_MODEL_ID,

src/agents/model-compat.test.ts

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -304,6 +304,16 @@ describe("isModernModelRef", () => {
304304
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4" })).toBe(true);
305305
});
306306

307+
it("includes Azure OpenAI gpt-5.4 variants in modern selection", () => {
308+
expect(isModernModelRef({ provider: "azure-openai-responses", id: "gpt-5.4" })).toBe(true);
309+
expect(isModernModelRef({ provider: "azure-openai-completions", id: "gpt-5.4" })).toBe(true);
310+
});
311+
312+
it("includes OpenAI realtime variants in modern selection", () => {
313+
expect(isModernModelRef({ provider: "openai", id: "gpt-realtime-1.5" })).toBe(true);
314+
expect(isModernModelRef({ provider: "openai", id: "gpt-realtime-mini" })).toBe(true);
315+
});
316+
307317
it("excludes opencode minimax variants from modern selection", () => {
308318
expect(isModernModelRef({ provider: "opencode", id: "minimax-m2.5" })).toBe(false);
309319
expect(isModernModelRef({ provider: "opencode", id: "minimax-m2.5" })).toBe(false);
@@ -373,6 +383,30 @@ describe("resolveForwardCompatModel", () => {
373383
expect(model?.maxTokens).toBe(128_000);
374384
});
375385

386+
it("resolves openai gpt-realtime-1.5 via template fallback", () => {
387+
const registry = createRegistry({
388+
"openai/gpt-5.2": createOpenAITemplateModel("gpt-5.2"),
389+
});
390+
const model = resolveForwardCompatModel("openai", "gpt-realtime-1.5", registry);
391+
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-realtime-1.5" });
392+
expect(model?.api).toBe("openai-responses");
393+
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
394+
expect(model?.reasoning).toBe(false);
395+
expect(model?.input).toEqual(["text"]);
396+
});
397+
398+
it("resolves openai gpt-realtime-mini via template fallback", () => {
399+
const registry = createRegistry({
400+
"openai/gpt-5.2": createOpenAITemplateModel("gpt-5.2"),
401+
});
402+
const model = resolveForwardCompatModel("openai", "gpt-realtime-mini", registry);
403+
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-realtime-mini" });
404+
expect(model?.api).toBe("openai-responses");
405+
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
406+
expect(model?.reasoning).toBe(false);
407+
expect(model?.input).toEqual(["text"]);
408+
});
409+
376410
it("resolves anthropic opus 4.6 via 4.5 template", () => {
377411
const registry = createRegistry({
378412
"anthropic/claude-opus-4-5": createTemplateModel("anthropic", "claude-opus-4-5"),

src/agents/model-forward-compat.ts

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
1010
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
1111
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
1212
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
13+
const OPENAI_GPT_REALTIME_15_MODEL_ID = "gpt-realtime-1.5";
14+
const OPENAI_GPT_REALTIME_MINI_MODEL_ID = "gpt-realtime-mini";
15+
const OPENAI_GPT_REALTIME_CONTEXT_TOKENS = 128_000;
16+
const OPENAI_GPT_REALTIME_MAX_TOKENS = 16_384;
17+
const OPENAI_GPT_REALTIME_TEMPLATE_MODEL_IDS = ["gpt-5.2", "gpt-4.1"] as const;
1318

1419
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
1520
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000;
@@ -91,6 +96,53 @@ function resolveOpenAIGpt54ForwardCompatModel(
9196
);
9297
}
9398

99+
function resolveOpenAIRealtimeForwardCompatModel(
100+
provider: string,
101+
modelId: string,
102+
modelRegistry: ModelRegistry,
103+
): Model<Api> | undefined {
104+
const normalizedProvider = normalizeProviderId(provider);
105+
if (normalizedProvider !== "openai") {
106+
return undefined;
107+
}
108+
109+
const trimmedModelId = modelId.trim();
110+
const lower = trimmedModelId.toLowerCase();
111+
if (lower !== OPENAI_GPT_REALTIME_15_MODEL_ID && lower !== OPENAI_GPT_REALTIME_MINI_MODEL_ID) {
112+
return undefined;
113+
}
114+
115+
return (
116+
cloneFirstTemplateModel({
117+
normalizedProvider,
118+
trimmedModelId,
119+
templateIds: [...OPENAI_GPT_REALTIME_TEMPLATE_MODEL_IDS],
120+
modelRegistry,
121+
patch: {
122+
api: "openai-responses",
123+
provider: normalizedProvider,
124+
baseUrl: "https://api.openai.com/v1",
125+
reasoning: false,
126+
input: ["text"],
127+
contextWindow: OPENAI_GPT_REALTIME_CONTEXT_TOKENS,
128+
maxTokens: OPENAI_GPT_REALTIME_MAX_TOKENS,
129+
},
130+
}) ??
131+
normalizeModelCompat({
132+
id: trimmedModelId,
133+
name: trimmedModelId,
134+
api: "openai-responses",
135+
provider: normalizedProvider,
136+
baseUrl: "https://api.openai.com/v1",
137+
reasoning: false,
138+
input: ["text"],
139+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
140+
contextWindow: OPENAI_GPT_REALTIME_CONTEXT_TOKENS,
141+
maxTokens: OPENAI_GPT_REALTIME_MAX_TOKENS,
142+
} as Model<Api>)
143+
);
144+
}
145+
94146
function cloneFirstTemplateModel(params: {
95147
normalizedProvider: string;
96148
trimmedModelId: string;
@@ -348,6 +400,7 @@ export function resolveForwardCompatModel(
348400
): Model<Api> | undefined {
349401
return (
350402
resolveOpenAIGpt54ForwardCompatModel(provider, modelId, modelRegistry) ??
403+
resolveOpenAIRealtimeForwardCompatModel(provider, modelId, modelRegistry) ??
351404
resolveOpenAICodexForwardCompatModel(provider, modelId, modelRegistry) ??
352405
resolveAnthropicOpus46ForwardCompatModel(provider, modelId, modelRegistry) ??
353406
resolveAnthropicSonnet46ForwardCompatModel(provider, modelId, modelRegistry) ??

src/agents/pi-embedded-runner/google.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -564,7 +564,9 @@ export async function sanitizeSessionHistory(params: {
564564
);
565565

566566
const isOpenAIResponsesApi =
567-
params.modelApi === "openai-responses" || params.modelApi === "openai-codex-responses";
567+
params.modelApi === "openai-responses" ||
568+
params.modelApi === "azure-openai-responses" ||
569+
params.modelApi === "openai-codex-responses";
568570
const hasSnapshot = Boolean(params.provider || params.modelApi || params.modelId);
569571
const priorSnapshot = hasSnapshot ? readLastModelSnapshot(params.sessionManager) : null;
570572
const modelChanged = priorSnapshot

src/agents/pi-embedded-runner/model.forward-compat.test.ts

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,68 @@ describe("pi embedded model e2e smoke", () => {
5858
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
5959
});
6060

61+
it("builds an openai forward-compat fallback for gpt-realtime-1.5", () => {
62+
mockDiscoveredModel({
63+
provider: "openai",
64+
modelId: "gpt-5.2",
65+
templateModel: {
66+
id: "gpt-5.2",
67+
name: "gpt-5.2",
68+
provider: "openai",
69+
api: "openai-responses",
70+
baseUrl: "https://api.openai.com/v1",
71+
input: ["text", "image"],
72+
reasoning: true,
73+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
74+
contextWindow: 400_000,
75+
maxTokens: 32_768,
76+
},
77+
});
78+
79+
const result = resolveModel("openai", "gpt-realtime-1.5", "/tmp/agent");
80+
expect(result.error).toBeUndefined();
81+
expect(result.model).toMatchObject({
82+
provider: "openai",
83+
id: "gpt-realtime-1.5",
84+
name: "gpt-realtime-1.5",
85+
api: "openai-responses",
86+
baseUrl: "https://api.openai.com/v1",
87+
reasoning: false,
88+
input: ["text"],
89+
});
90+
});
91+
92+
it("builds an openai forward-compat fallback for gpt-realtime-mini", () => {
93+
mockDiscoveredModel({
94+
provider: "openai",
95+
modelId: "gpt-5.2",
96+
templateModel: {
97+
id: "gpt-5.2",
98+
name: "gpt-5.2",
99+
provider: "openai",
100+
api: "openai-responses",
101+
baseUrl: "https://api.openai.com/v1",
102+
input: ["text", "image"],
103+
reasoning: true,
104+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
105+
contextWindow: 400_000,
106+
maxTokens: 32_768,
107+
},
108+
});
109+
110+
const result = resolveModel("openai", "gpt-realtime-mini", "/tmp/agent");
111+
expect(result.error).toBeUndefined();
112+
expect(result.model).toMatchObject({
113+
provider: "openai",
114+
id: "gpt-realtime-mini",
115+
name: "gpt-realtime-mini",
116+
api: "openai-responses",
117+
baseUrl: "https://api.openai.com/v1",
118+
reasoning: false,
119+
input: ["text"],
120+
});
121+
});
122+
61123
it("builds an openai-codex forward-compat fallback for gpt-5.3-codex-spark", () => {
62124
mockOpenAICodexTemplateModel();
63125

src/agents/pi-embedded-runner/openai-stream-wrappers.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import { log } from "./logger.js";
66
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
77
type OpenAIReasoningEffort = "low" | "medium" | "high";
88

9-
const OPENAI_RESPONSES_APIS = new Set(["openai-responses"]);
9+
const OPENAI_RESPONSES_APIS = new Set(["openai-responses", "azure-openai-responses"]);
1010
const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai", "azure-openai-responses"]);
1111

1212
function isDirectOpenAIBaseUrl(baseUrl: unknown): boolean {

src/agents/pi-embedded-runner/run/attempt.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1998,6 +1998,7 @@ export async function runEmbeddedAttempt(
19981998

19991999
if (
20002000
params.model.api === "openai-responses" ||
2001+
params.model.api === "azure-openai-responses" ||
20012002
params.model.api === "openai-codex-responses"
20022003
) {
20032004
const inner = activeSession.agent.streamFn;

src/agents/transcript-policy.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ const OPENAI_MODEL_APIS = new Set([
3535
"openai",
3636
"openai-completions",
3737
"openai-responses",
38+
"azure-openai-responses",
3839
"openai-codex-responses",
3940
]);
4041

0 commit comments

Comments
 (0)