@@ -885,4 +885,136 @@ describe("getModelParams", () => {
885885 expect ( result . reasoningBudget ) . toBe ( 8192 ) // Default thinking tokens
886886 } )
887887 } )
888+
889+ describe ( "GPT-5 temperature hardcoding" , ( ) => {
890+ it ( "should hardcode temperature to 1 for gpt-5 models" , ( ) => {
891+ const model : ModelInfo = {
892+ ...baseModel ,
893+ }
894+
895+ const result = getModelParams ( {
896+ modelId : "gpt-5-2025-08-07" ,
897+ format : "openai" as const ,
898+ settings : { modelTemperature : 0.5 } , // User setting should be overridden
899+ model,
900+ } )
901+
902+ expect ( result . temperature ) . toBe ( 1 )
903+ } )
904+
905+ it ( "should hardcode temperature to 1 for gpt-5-mini models" , ( ) => {
906+ const model : ModelInfo = {
907+ ...baseModel ,
908+ }
909+
910+ const result = getModelParams ( {
911+ modelId : "gpt-5-mini-2025-08-07" ,
912+ format : "openai" as const ,
913+ settings : { modelTemperature : 0.7 } , // User setting should be overridden
914+ model,
915+ } )
916+
917+ expect ( result . temperature ) . toBe ( 1 )
918+ } )
919+
920+ it ( "should hardcode temperature to 1 for gpt-5-nano models" , ( ) => {
921+ const model : ModelInfo = {
922+ ...baseModel ,
923+ }
924+
925+ const result = getModelParams ( {
926+ modelId : "gpt-5-nano-2025-08-07" ,
927+ format : "openai" as const ,
928+ settings : { modelTemperature : 0.3 } , // User setting should be overridden
929+ model,
930+ } )
931+
932+ expect ( result . temperature ) . toBe ( 1 )
933+ } )
934+
935+ it ( "should hardcode temperature to 1 even when no temperature is specified for gpt-5" , ( ) => {
936+ const model : ModelInfo = {
937+ ...baseModel ,
938+ }
939+
940+ const result = getModelParams ( {
941+ modelId : "gpt-5-2025-08-07" ,
942+ format : "openai" as const ,
943+ settings : { } , // No temperature specified
944+ model,
945+ defaultTemperature : 0.2 , // Default should also be overridden
946+ } )
947+
948+ expect ( result . temperature ) . toBe ( 1 )
949+ } )
950+
951+ it ( "should not hardcode temperature for non-gpt-5 openai models" , ( ) => {
952+ const model : ModelInfo = {
953+ ...baseModel ,
954+ }
955+
956+ const result = getModelParams ( {
957+ modelId : "gpt-4-turbo" ,
958+ format : "openai" as const ,
959+ settings : { modelTemperature : 0.5 } ,
960+ model,
961+ } )
962+
963+ expect ( result . temperature ) . toBe ( 0.5 )
964+ } )
965+
966+ it ( "should not hardcode temperature for gpt-5 models in non-openai format" , ( ) => {
967+ const model : ModelInfo = {
968+ ...baseModel ,
969+ }
970+
971+ const result = getModelParams ( {
972+ modelId : "gpt-5-2025-08-07" ,
973+ format : "openrouter" as const ,
974+ settings : { modelTemperature : 0.5 } ,
975+ model,
976+ } )
977+
978+ expect ( result . temperature ) . toBe ( 0.5 ) // Should not be hardcoded for openrouter
979+ } )
980+
981+ it ( "should hardcode temperature for gpt-5 with reasoning effort" , ( ) => {
982+ const model : ModelInfo = {
983+ ...baseModel ,
984+ supportsReasoningEffort : true ,
985+ }
986+
987+ const result = getModelParams ( {
988+ modelId : "gpt-5-2025-08-07" ,
989+ format : "openai" as const ,
990+ settings : {
991+ modelTemperature : 0.5 ,
992+ reasoningEffort : "high" ,
993+ } ,
994+ model,
995+ } )
996+
997+ expect ( result . temperature ) . toBe ( 1 )
998+ expect ( result . reasoningEffort ) . toBe ( "high" )
999+ } )
1000+
1001+ it ( "should hardcode temperature for gpt-5 with verbosity settings" , ( ) => {
1002+ const model : ModelInfo = {
1003+ ...baseModel ,
1004+ }
1005+
1006+ const result = getModelParams ( {
1007+ modelId : "gpt-5-2025-08-07" ,
1008+ format : "openai" as const ,
1009+ settings : {
1010+ modelTemperature : 0.5 ,
1011+ verbosity : "high" ,
1012+ } ,
1013+ model,
1014+ } )
1015+
1016+ expect ( result . temperature ) . toBe ( 1 )
1017+ expect ( result . verbosity ) . toBe ( "high" )
1018+ } )
1019+ } )
8881020} )
0 commit comments