@@ -3,21 +3,12 @@ import {
33 mainlandZAiModels ,
44 internationalZAiDefaultModelId ,
55 mainlandZAiDefaultModelId ,
6- type InternationalZAiModelId ,
7- type MainlandZAiModelId ,
86 type ModelInfo ,
97 ZAI_DEFAULT_TEMPERATURE ,
108 zaiApiLineConfigs ,
119} from "@roo-code/types"
1210
13- import { Anthropic } from "@anthropic-ai/sdk"
14- import OpenAI from "openai"
15-
1611import type { ApiHandlerOptions } from "../../shared/api"
17- import { getModelMaxOutputTokens } from "../../shared/api"
18- import { convertToOpenAiMessages } from "../transform/openai-format"
19- import type { ApiHandlerCreateMessageMetadata } from "../index"
20- import { handleOpenAIError } from "./utils/openai-error-handler"
2112
2213import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"
2314
@@ -37,67 +28,4 @@ export class ZAiHandler extends BaseOpenAiCompatibleProvider<string> {
3728 defaultTemperature : ZAI_DEFAULT_TEMPERATURE ,
3829 } )
3930 }
40-
41- protected override createStream (
42- systemPrompt : string ,
43- messages : Anthropic . Messages . MessageParam [ ] ,
44- metadata ?: ApiHandlerCreateMessageMetadata ,
45- requestOptions ?: OpenAI . RequestOptions ,
46- ) {
47- const { id : model , info } = this . getModel ( )
48-
49- // Centralized cap: clamp to 20% of the context window (unless provider-specific exceptions apply)
50- const max_tokens =
51- getModelMaxOutputTokens ( {
52- modelId : model ,
53- model : info ,
54- settings : this . options ,
55- format : "openai" ,
56- } ) ?? undefined
57-
58- const temperature = this . options . modelTemperature ?? this . defaultTemperature
59-
60- const params : OpenAI . Chat . Completions . ChatCompletionCreateParamsStreaming = {
61- model,
62- max_tokens,
63- temperature,
64- messages : [ { role : "system" , content : systemPrompt } , ...convertToOpenAiMessages ( messages ) ] ,
65- stream : true ,
66- stream_options : { include_usage : true } ,
67- }
68-
69- // Add thinking parameter if reasoning is enabled and model supports it
70- const { id : modelId , info : modelInfo } = this . getModel ( )
71- if ( this . options . enableReasoningEffort && modelInfo . supportsReasoningBinary ) {
72- ; ( params as any ) . thinking = { type : "enabled" }
73- }
74-
75- try {
76- return this . client . chat . completions . create ( params , requestOptions )
77- } catch ( error ) {
78- throw handleOpenAIError ( error , this . providerName )
79- }
80- }
81-
82- override async completePrompt ( prompt : string ) : Promise < string > {
83- const { id : modelId } = this . getModel ( )
84-
85- const params : OpenAI . Chat . Completions . ChatCompletionCreateParams = {
86- model : modelId ,
87- messages : [ { role : "user" , content : prompt } ] ,
88- }
89-
90- // Add thinking parameter if reasoning is enabled and model supports it
91- const { info : modelInfo } = this . getModel ( )
92- if ( this . options . enableReasoningEffort && modelInfo . supportsReasoningBinary ) {
93- ; ( params as any ) . thinking = { type : "enabled" }
94- }
95-
96- try {
97- const response = await this . client . chat . completions . create ( params )
98- return response . choices [ 0 ] ?. message . content || ""
99- } catch ( error ) {
100- throw handleOpenAIError ( error , this . providerName )
101- }
102- }
10331}
0 commit comments