Skip to content

Commit ded6486

Browse files
feat: enable mergeToolResultText for all OpenAI-compatible providers (#10299)
1 parent 9b99890 commit ded6486

File tree

15 files changed

+41
-21
lines changed

15 files changed

+41
-21
lines changed

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,13 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
9090
model,
9191
max_tokens,
9292
temperature,
93-
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
93+
// Enable mergeToolResultText to merge environment_details and other text content
94+
// after tool_results into the last tool message. This prevents reasoning/thinking
95+
// models from dropping reasoning_content when they see a user message after tool results.
96+
messages: [
97+
{ role: "system", content: systemPrompt },
98+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
99+
],
94100
stream: true,
95101
stream_options: { include_usage: true },
96102
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),

src/api/providers/cerebras.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan
106106
supportsNativeTools && metadata?.tools && metadata.tools.length > 0 && metadata?.toolProtocol !== "xml"
107107

108108
// Convert Anthropic messages to OpenAI format (Cerebras is OpenAI-compatible)
109-
const openaiMessages = convertToOpenAiMessages(messages)
109+
const openaiMessages = convertToOpenAiMessages(messages, { mergeToolResultText: true })
110110

111111
// Prepare request body following Cerebras API specification exactly
112112
const requestBody: Record<string, any> = {

src/api/providers/chutes.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@ export class ChutesHandler extends RouterProvider implements SingleCompletionHan
4444
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
4545
model,
4646
max_tokens,
47-
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
47+
messages: [
48+
{ role: "system", content: systemPrompt },
49+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
50+
],
4851
stream: true,
4952
stream_options: { include_usage: true },
5053
...(metadata?.tools && { tools: metadata.tools }),

src/api/providers/deepinfra.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,10 @@ export class DeepInfraHandler extends RouterProvider implements SingleCompletion
7272

7373
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
7474
model: modelId,
75-
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
75+
messages: [
76+
{ role: "system", content: systemPrompt },
77+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
78+
],
7679
stream: true,
7780
stream_options: { include_usage: true },
7881
reasoning_effort,

src/api/providers/featherless.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@ export class FeatherlessHandler extends BaseOpenAiCompatibleProvider<Featherless
4444
model,
4545
max_tokens,
4646
temperature,
47-
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
47+
messages: [
48+
{ role: "system", content: systemPrompt },
49+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
50+
],
4851
stream: true,
4952
stream_options: { include_usage: true },
5053
}

src/api/providers/huggingface.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,10 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
5656
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
5757
model: modelId,
5858
temperature,
59-
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
59+
messages: [
60+
{ role: "system", content: systemPrompt },
61+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
62+
],
6063
stream: true,
6164
stream_options: { include_usage: true },
6265
}

src/api/providers/lite-llm.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa
4545
): ApiStream {
4646
const { id: modelId, info } = await this.fetchModel()
4747

48-
const openAiMessages = convertToOpenAiMessages(messages)
48+
const openAiMessages = convertToOpenAiMessages(messages, { mergeToolResultText: true })
4949

5050
// Prepare messages with cache control if enabled and supported
5151
let systemMessage: OpenAI.Chat.ChatCompletionMessageParam

src/api/providers/lm-studio.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
4444
): ApiStream {
4545
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
4646
{ role: "system", content: systemPrompt },
47-
...convertToOpenAiMessages(messages),
47+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
4848
]
4949

5050
// LM Studio always supports native tools (https://lmstudio.ai/docs/developer/core/tools)

src/api/providers/openai.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
126126
}
127127
}
128128

129-
convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)]
129+
convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: true })]
130130

131131
if (modelInfo.supportsPromptCache) {
132132
// Note: the following logic is copied from openrouter:
@@ -234,7 +234,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
234234
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
235235
: enabledLegacyFormat
236236
? [systemMessage, ...convertToSimpleMessages(messages)]
237-
: [systemMessage, ...convertToOpenAiMessages(messages)],
237+
: [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: true })],
238238
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
239239
...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
240240
...(metadata?.toolProtocol === "native" && {
@@ -349,7 +349,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
349349
role: "developer",
350350
content: `Formatting re-enabled\n${systemPrompt}`,
351351
},
352-
...convertToOpenAiMessages(messages),
352+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
353353
],
354354
stream: true,
355355
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
@@ -386,7 +386,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
386386
role: "developer",
387387
content: `Formatting re-enabled\n${systemPrompt}`,
388388
},
389-
...convertToOpenAiMessages(messages),
389+
...convertToOpenAiMessages(messages, { mergeToolResultText: true }),
390390
],
391391
reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined,
392392
temperature: undefined,

src/api/providers/openrouter.ts

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -229,13 +229,15 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
229229

230230
// Convert Anthropic messages to OpenAI format.
231231
// Pass normalization function for Mistral compatibility (requires 9-char alphanumeric IDs)
232+
// Enable mergeToolResultText to merge environment_details after tool_results into the last
233+
// tool message, preventing reasoning/thinking models from dropping reasoning_content.
232234
const isMistral = modelId.toLowerCase().includes("mistral")
233235
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
234236
{ role: "system", content: systemPrompt },
235-
...convertToOpenAiMessages(
236-
messages,
237-
isMistral ? { normalizeToolCallId: normalizeMistralToolCallId } : undefined,
238-
),
237+
...convertToOpenAiMessages(messages, {
238+
mergeToolResultText: true,
239+
...(isMistral && { normalizeToolCallId: normalizeMistralToolCallId }),
240+
}),
239241
]
240242

241243
// DeepSeek highly recommends using user instead of system role.

0 commit comments

Comments
 (0)