Skip to content

Commit b8e2895

Browse files
authored
fix(app): support anthropic models on azure cognitive services (#8335)
1 parent 6e028ec commit b8e2895

File tree

3 files changed

+67
-13
lines changed

3 files changed

+67
-13
lines changed

packages/opencode/src/provider/provider.ts

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -586,6 +586,13 @@ export namespace Provider {
586586
})
587587
export type Info = z.infer<typeof Info>
588588

589+
export function isAzureAnthropic(model: Model): boolean {
590+
return (
591+
model.providerID === "azure-cognitive-services" &&
592+
(model.api.id.includes("claude") || model.api.id.includes("anthropic"))
593+
)
594+
}
595+
589596
function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model {
590597
const m: Model = {
591598
id: model.id,
@@ -1006,9 +1013,16 @@ export namespace Provider {
10061013
})
10071014
}
10081015

1009-
// Special case: google-vertex-anthropic uses a subpath import
1010-
const bundledKey =
1011-
model.providerID === "google-vertex-anthropic" ? "@ai-sdk/google-vertex/anthropic" : model.api.npm
1016+
// Special cases for providers that use different npm packages
1017+
if (isAzureAnthropic(model)) {
1018+
const resourceName = Env.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME")
1019+
if (resourceName) options["baseURL"] = `https://${resourceName}.services.ai.azure.com/anthropic/v1/`
1020+
}
1021+
const bundledKey = iife(() => {
1022+
if (model.providerID === "google-vertex-anthropic") return "@ai-sdk/google-vertex/anthropic"
1023+
if (isAzureAnthropic(model)) return "@ai-sdk/anthropic"
1024+
return model.api.npm
1025+
})
10121026
const bundledFn = BUNDLED_PROVIDERS[bundledKey]
10131027
if (bundledFn) {
10141028
log.info("using bundled provider", { providerID: model.providerID, pkg: bundledKey })
@@ -1074,8 +1088,11 @@ export namespace Provider {
10741088
const provider = s.providers[model.providerID]
10751089
const sdk = await getSDK(model)
10761090

1091+
// Skip custom model loader for Azure Anthropic models since they use @ai-sdk/anthropic
1092+
const useCustomLoader = s.modelLoaders[model.providerID] && !isAzureAnthropic(model)
1093+
10771094
try {
1078-
const language = s.modelLoaders[model.providerID]
1095+
const language = useCustomLoader
10791096
? await s.modelLoaders[model.providerID](sdk, model.api.id, provider.options)
10801097
: sdk.languageModel(model.api.id)
10811098
s.models.set(key, language)

packages/opencode/src/provider/transform.ts

Lines changed: 45 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,17 @@ function mimeToModality(mime: string): Modality | undefined {
1616
}
1717

1818
export namespace ProviderTransform {
19+
function isAzureAnthropic(model: Provider.Model): boolean {
20+
return (
21+
model.providerID === "azure-cognitive-services" &&
22+
(model.api.id.includes("claude") || model.api.id.includes("anthropic"))
23+
)
24+
}
25+
26+
function usesAnthropicSDK(model: Provider.Model): boolean {
27+
return model.api.npm === "@ai-sdk/anthropic" || isAzureAnthropic(model)
28+
}
29+
1930
function normalizeMessages(
2031
msgs: ModelMessage[],
2132
model: Provider.Model,
@@ -50,7 +61,7 @@ export namespace ProviderTransform {
5061

5162
// Anthropic rejects messages with empty content - filter out empty string messages
5263
// and remove empty text/reasoning parts from array content
53-
if (model.api.npm === "@ai-sdk/anthropic") {
64+
if (usesAnthropicSDK(model)) {
5465
msgs = msgs
5566
.map((msg) => {
5667
if (typeof msg.content === "string") {
@@ -256,7 +267,7 @@ export namespace ProviderTransform {
256267
model.providerID === "anthropic" ||
257268
model.api.id.includes("anthropic") ||
258269
model.api.id.includes("claude") ||
259-
model.api.npm === "@ai-sdk/anthropic"
270+
usesAnthropicSDK(model)
260271
) {
261272
msgs = applyCaching(msgs, model.providerID)
262273
}
@@ -308,6 +319,23 @@ export namespace ProviderTransform {
308319
const id = model.id.toLowerCase()
309320
if (id.includes("deepseek") || id.includes("minimax") || id.includes("glm") || id.includes("mistral")) return {}
310321

322+
if (isAzureAnthropic(model)) {
323+
return {
324+
high: {
325+
thinking: {
326+
type: "enabled",
327+
budgetTokens: 16000,
328+
},
329+
},
330+
max: {
331+
thinking: {
332+
type: "enabled",
333+
budgetTokens: 31999,
334+
},
335+
},
336+
}
337+
}
338+
311339
switch (model.api.npm) {
312340
case "@openrouter/ai-sdk-provider":
313341
if (!model.id.includes("gpt") && !model.id.includes("gemini-3") && !model.id.includes("grok-4")) return {}
@@ -578,6 +606,9 @@ export namespace ProviderTransform {
578606
}
579607

580608
export function providerOptions(model: Provider.Model, options: { [x: string]: any }) {
609+
if (isAzureAnthropic(model)) {
610+
return { ["anthropic" as string]: options }
611+
}
581612
switch (model.api.npm) {
582613
case "@ai-sdk/github-copilot":
583614
case "@ai-sdk/openai":
@@ -613,16 +644,27 @@ export namespace ProviderTransform {
613644
}
614645
}
615646

647+
export function maxOutputTokens(model: Provider.Model, options: Record<string, any>, globalLimit: number): number
616648
export function maxOutputTokens(
617649
npm: string,
618650
options: Record<string, any>,
619651
modelLimit: number,
620652
globalLimit: number,
653+
): number
654+
export function maxOutputTokens(
655+
arg1: Provider.Model | string,
656+
options: Record<string, any>,
657+
arg3: number,
658+
arg4?: number,
621659
): number {
660+
const model = typeof arg1 === "object" ? arg1 : null
661+
const npm = model ? model.api.npm : (arg1 as string)
662+
const modelLimit = model ? model.limit.output : arg3
663+
const globalLimit = model ? arg3 : arg4!
622664
const modelCap = modelLimit || globalLimit
623665
const standardLimit = Math.min(modelCap, globalLimit)
624666

625-
if (npm === "@ai-sdk/anthropic") {
667+
if (model ? usesAnthropicSDK(model) : npm === "@ai-sdk/anthropic") {
626668
const thinking = options?.["thinking"]
627669
const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
628670
const enabled = thinking?.["type"] === "enabled"

packages/opencode/src/session/llm.ts

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -133,12 +133,7 @@ export namespace LLM {
133133

134134
const maxOutputTokens = isCodex
135135
? undefined
136-
: ProviderTransform.maxOutputTokens(
137-
input.model.api.npm,
138-
params.options,
139-
input.model.limit.output,
140-
OUTPUT_TOKEN_MAX,
141-
)
136+
: ProviderTransform.maxOutputTokens(input.model, params.options, OUTPUT_TOKEN_MAX)
142137

143138
const tools = await resolveTools(input)
144139

0 commit comments

Comments
 (0)