Skip to content

Commit 3b468dd

Browse files
committed
Move zai into the base provider
1 parent 4720455 commit 3b468dd

File tree

2 files changed

+17
-77
lines changed

2 files changed

+17
-77
lines changed

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,11 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
9595
...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
9696
}
9797

98+
// Add thinking parameter if reasoning is enabled and model supports it
99+
if (this.options.enableReasoningEffort && info.supportsReasoningBinary) {
100+
;(params as any).thinking = { type: "enabled" }
101+
}
102+
98103
try {
99104
return this.client.chat.completions.create(params, requestOptions)
100105
} catch (error) {
@@ -219,13 +224,20 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
219224
}
220225

221226
async completePrompt(prompt: string): Promise<string> {
222-
const { id: modelId } = this.getModel()
227+
const { id: modelId, info: modelInfo } = this.getModel()
228+
229+
const params: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
230+
model: modelId,
231+
messages: [{ role: "user", content: prompt }],
232+
}
233+
234+
// Add thinking parameter if reasoning is enabled and model supports it
235+
if (this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary) {
236+
;(params as any).thinking = { type: "enabled" }
237+
}
223238

224239
try {
225-
const response = await this.client.chat.completions.create({
226-
model: modelId,
227-
messages: [{ role: "user", content: prompt }],
228-
})
240+
const response = await this.client.chat.completions.create(params)
229241

230242
// Check for provider-specific error responses (e.g., MiniMax base_resp)
231243
const responseAny = response as any

src/api/providers/zai.ts

Lines changed: 0 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -3,21 +3,12 @@ import {
33
mainlandZAiModels,
44
internationalZAiDefaultModelId,
55
mainlandZAiDefaultModelId,
6-
type InternationalZAiModelId,
7-
type MainlandZAiModelId,
86
type ModelInfo,
97
ZAI_DEFAULT_TEMPERATURE,
108
zaiApiLineConfigs,
119
} from "@roo-code/types"
1210

13-
import { Anthropic } from "@anthropic-ai/sdk"
14-
import OpenAI from "openai"
15-
1611
import type { ApiHandlerOptions } from "../../shared/api"
17-
import { getModelMaxOutputTokens } from "../../shared/api"
18-
import { convertToOpenAiMessages } from "../transform/openai-format"
19-
import type { ApiHandlerCreateMessageMetadata } from "../index"
20-
import { handleOpenAIError } from "./utils/openai-error-handler"
2112

2213
import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"
2314

@@ -37,67 +28,4 @@ export class ZAiHandler extends BaseOpenAiCompatibleProvider<string> {
3728
defaultTemperature: ZAI_DEFAULT_TEMPERATURE,
3829
})
3930
}
40-
41-
protected override createStream(
42-
systemPrompt: string,
43-
messages: Anthropic.Messages.MessageParam[],
44-
metadata?: ApiHandlerCreateMessageMetadata,
45-
requestOptions?: OpenAI.RequestOptions,
46-
) {
47-
const { id: model, info } = this.getModel()
48-
49-
// Centralized cap: clamp to 20% of the context window (unless provider-specific exceptions apply)
50-
const max_tokens =
51-
getModelMaxOutputTokens({
52-
modelId: model,
53-
model: info,
54-
settings: this.options,
55-
format: "openai",
56-
}) ?? undefined
57-
58-
const temperature = this.options.modelTemperature ?? this.defaultTemperature
59-
60-
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
61-
model,
62-
max_tokens,
63-
temperature,
64-
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
65-
stream: true,
66-
stream_options: { include_usage: true },
67-
}
68-
69-
// Add thinking parameter if reasoning is enabled and model supports it
70-
const { id: modelId, info: modelInfo } = this.getModel()
71-
if (this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary) {
72-
;(params as any).thinking = { type: "enabled" }
73-
}
74-
75-
try {
76-
return this.client.chat.completions.create(params, requestOptions)
77-
} catch (error) {
78-
throw handleOpenAIError(error, this.providerName)
79-
}
80-
}
81-
82-
override async completePrompt(prompt: string): Promise<string> {
83-
const { id: modelId } = this.getModel()
84-
85-
const params: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
86-
model: modelId,
87-
messages: [{ role: "user", content: prompt }],
88-
}
89-
90-
// Add thinking parameter if reasoning is enabled and model supports it
91-
const { info: modelInfo } = this.getModel()
92-
if (this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary) {
93-
;(params as any).thinking = { type: "enabled" }
94-
}
95-
96-
try {
97-
const response = await this.client.chat.completions.create(params)
98-
return response.choices[0]?.message.content || ""
99-
} catch (error) {
100-
throw handleOpenAIError(error, this.providerName)
101-
}
102-
}
10331
}

0 commit comments

Comments
 (0)