@@ -128,11 +128,64 @@ describe("OpenAiNativeHandler", () => {
128128 } )
129129
130130 describe ( "completePrompt" , ( ) => {
131- it ( "should throw error for all models since Responses API doesn't support non-streaming" , async ( ) => {
131+ it ( "should handle non-streaming completion using Responses API" , async ( ) => {
132+ // Mock the responses.create method to return a non-streaming response
133+ mockResponsesCreate . mockResolvedValue ( {
134+ output : [
135+ {
136+ type : "message" ,
137+ content : [
138+ {
139+ type : "output_text" ,
140+ text : "This is the completion response" ,
141+ } ,
142+ ] ,
143+ } ,
144+ ] ,
145+ } )
146+
147+ const result = await handler . completePrompt ( "Test prompt" )
148+
149+ expect ( result ) . toBe ( "This is the completion response" )
150+ expect ( mockResponsesCreate ) . toHaveBeenCalledWith (
151+ expect . objectContaining ( {
152+ model : "gpt-4.1" ,
153+ stream : false ,
154+ store : false ,
155+ input : [
156+ {
157+ role : "user" ,
158+ content : [ { type : "input_text" , text : "Test prompt" } ] ,
159+ } ,
160+ ] ,
161+ } ) ,
162+ )
163+ } )
164+
165+ it ( "should handle SDK errors in completePrompt" , async ( ) => {
166+ // Mock SDK to throw an error
167+ mockResponsesCreate . mockRejectedValue ( new Error ( "API Error" ) )
168+
132169 await expect ( handler . completePrompt ( "Test prompt" ) ) . rejects . toThrow (
133- "completePrompt is not supported. Use createMessage (Responses API) instead. " ,
170+ "OpenAI Native completion error: API Error " ,
134171 )
135172 } )
173+
174+ it ( "should return empty string when no text in response" , async ( ) => {
175+ // Mock the responses.create method to return a response without text
176+ mockResponsesCreate . mockResolvedValue ( {
177+ output : [
178+ {
179+ type : "message" ,
180+ content : [ ] ,
181+ } ,
182+ ] ,
183+ } )
184+
185+ const result = await handler . completePrompt ( "Test prompt" )
186+
187+ expect ( result ) . toBe ( "" )
188+ } )
136189 } )
137190
138191 describe ( "getModel" , ( ) => {
@@ -1235,9 +1288,30 @@ describe("GPT-5 streaming event coverage (additional)", () => {
12351288 apiModelId : "codex-mini-latest" ,
12361289 } )
12371290
1238- // Codex Mini now uses the same Responses API as GPT-5, which doesn't support non-streaming
1239- await expect ( handler . completePrompt ( "Write a hello world function in Python" ) ) . rejects . toThrow (
1240- "completePrompt is not supported. Use createMessage (Responses API) instead." ,
1291+ // Mock the responses.create method to return a non-streaming response
1292+ mockResponsesCreate . mockResolvedValue ( {
1293+ output : [
1294+ {
1295+ type : "message" ,
1296+ content : [
1297+ {
1298+ type : "output_text" ,
1299+ text : "def hello_world():\n print('Hello, World!')" ,
1300+ } ,
1301+ ] ,
1302+ } ,
1303+ ] ,
1304+ } )
1305+
1306+ const result = await handler . completePrompt ( "Write a hello world function in Python" )
1307+
1308+ expect ( result ) . toBe ( "def hello_world():\n print('Hello, World!')" )
1309+ expect ( mockResponsesCreate ) . toHaveBeenCalledWith (
1310+ expect . objectContaining ( {
1311+ model : "codex-mini-latest" ,
1312+ stream : false ,
1313+ store : false ,
1314+ } ) ,
12411315 )
12421316 } )
12431317
0 commit comments