Skip to content

Commit 44fb382

Browse files
feat(api): remove prompt_cache_key param from responses, phase field from message types
1 parent 656e3ca commit 44fb382

10 files changed

Lines changed: 5 additions & 58 deletions

File tree

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 148
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6bfe886b5ded0fe3bf37ca672698814e16e0836a093ceef65dac37ae44d1ad6b.yml
3-
openapi_spec_hash: 6b1344a59044318e824c8d1af96033c7
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a0aa54a302fbd7fff4ed7ad8a8547587d37b63324fc4af652bfa685ee9f8da44.yml
3+
openapi_spec_hash: e45c5af19307cfc8b9baa4b8f8e865a0
44
config_hash: 7f49c38fa3abe9b7038ffe62262c4912

src/openai/resources/responses/responses.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1660,7 +1660,6 @@ def compact(
16601660
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
16611661
instructions: Optional[str] | Omit = omit,
16621662
previous_response_id: Optional[str] | Omit = omit,
1663-
prompt_cache_key: Optional[str] | Omit = omit,
16641663
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
16651664
# The extra values given here take precedence over values defined on the client or passed to this method.
16661665
extra_headers: Headers | None = None,
@@ -1696,8 +1695,6 @@ def compact(
16961695
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
16971696
Cannot be used in conjunction with `conversation`.
16981697
1699-
prompt_cache_key: A key to use when reading from or writing to the prompt cache.
1700-
17011698
extra_headers: Send extra headers
17021699
17031700
extra_query: Add additional query parameters to the request
@@ -1714,7 +1711,6 @@ def compact(
17141711
"input": input,
17151712
"instructions": instructions,
17161713
"previous_response_id": previous_response_id,
1717-
"prompt_cache_key": prompt_cache_key,
17181714
},
17191715
response_compact_params.ResponseCompactParams,
17201716
),
@@ -3325,7 +3321,6 @@ async def compact(
33253321
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
33263322
instructions: Optional[str] | Omit = omit,
33273323
previous_response_id: Optional[str] | Omit = omit,
3328-
prompt_cache_key: Optional[str] | Omit = omit,
33293324
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
33303325
# The extra values given here take precedence over values defined on the client or passed to this method.
33313326
extra_headers: Headers | None = None,
@@ -3361,8 +3356,6 @@ async def compact(
33613356
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
33623357
Cannot be used in conjunction with `conversation`.
33633358
3364-
prompt_cache_key: A key to use when reading from or writing to the prompt cache.
3365-
33663359
extra_headers: Send extra headers
33673360
33683361
extra_query: Add additional query parameters to the request
@@ -3379,7 +3372,6 @@ async def compact(
33793372
"input": input,
33803373
"instructions": instructions,
33813374
"previous_response_id": previous_response_id,
3382-
"prompt_cache_key": prompt_cache_key,
33833375
},
33843376
response_compact_params.ResponseCompactParams,
33853377
),

src/openai/types/responses/easy_input_message.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,5 @@ class EasyInputMessage(BaseModel):
3030
One of `user`, `assistant`, `system`, or `developer`.
3131
"""
3232

33-
phase: Optional[Literal["commentary", "final_answer"]] = None
34-
"""The phase of an assistant message.
35-
36-
Use `commentary` for an intermediate assistant message and `final_answer` for
37-
the final assistant message. For follow-up requests with models like
38-
`gpt-5.3-codex` and later, preserve and resend phase on all assistant messages.
39-
Omitting it can degrade performance. Not used for user messages.
40-
"""
41-
4233
type: Optional[Literal["message"]] = None
4334
"""The type of the message input. Always `message`."""

src/openai/types/responses/easy_input_message_param.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from __future__ import annotations
44

5-
from typing import Union, Optional
5+
from typing import Union
66
from typing_extensions import Literal, Required, TypedDict
77

88
from .response_input_message_content_list_param import ResponseInputMessageContentListParam
@@ -31,14 +31,5 @@ class EasyInputMessageParam(TypedDict, total=False):
3131
One of `user`, `assistant`, `system`, or `developer`.
3232
"""
3333

34-
phase: Optional[Literal["commentary", "final_answer"]]
35-
"""The phase of an assistant message.
36-
37-
Use `commentary` for an intermediate assistant message and `final_answer` for
38-
the final assistant message. For follow-up requests with models like
39-
`gpt-5.3-codex` and later, preserve and resend phase on all assistant messages.
40-
Omitting it can degrade performance. Not used for user messages.
41-
"""
42-
4334
type: Literal["message"]
4435
"""The type of the message input. Always `message`."""

src/openai/types/responses/response_compact_params.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,3 @@ class ResponseCompactParams(TypedDict, total=False):
131131
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
132132
Cannot be used in conjunction with `conversation`.
133133
"""
134-
135-
prompt_cache_key: Optional[str]
136-
"""A key to use when reading from or writing to the prompt cache."""

src/openai/types/responses/response_output_message.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

3-
from typing import List, Union, Optional
3+
from typing import List, Union
44
from typing_extensions import Literal, Annotated, TypeAlias
55

66
from ..._utils import PropertyInfo
@@ -34,12 +34,3 @@ class ResponseOutputMessage(BaseModel):
3434

3535
type: Literal["message"]
3636
"""The type of the output message. Always `message`."""
37-
38-
phase: Optional[Literal["commentary", "final_answer"]] = None
39-
"""The phase of an assistant message.
40-
41-
Use `commentary` for an intermediate assistant message and `final_answer` for
42-
the final assistant message. For follow-up requests with models like
43-
`gpt-5.3-codex` and later, preserve and resend phase on all assistant messages.
44-
Omitting it can degrade performance. Not used for user messages.
45-
"""

src/openai/types/responses/response_output_message_param.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from __future__ import annotations
44

5-
from typing import Union, Iterable, Optional
5+
from typing import Union, Iterable
66
from typing_extensions import Literal, Required, TypeAlias, TypedDict
77

88
from .response_output_text_param import ResponseOutputTextParam
@@ -34,12 +34,3 @@ class ResponseOutputMessageParam(TypedDict, total=False):
3434

3535
type: Required[Literal["message"]]
3636
"""The type of the output message. Always `message`."""
37-
38-
phase: Optional[Literal["commentary", "final_answer"]]
39-
"""The phase of an assistant message.
40-
41-
Use `commentary` for an intermediate assistant message and `final_answer` for
42-
the final assistant message. For follow-up requests with models like
43-
`gpt-5.3-codex` and later, preserve and resend phase on all assistant messages.
44-
Omitting it can degrade performance. Not used for user messages.
45-
"""

tests/api_resources/conversations/test_items.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
4444
{
4545
"content": "string",
4646
"role": "user",
47-
"phase": "commentary",
4847
"type": "message",
4948
}
5049
],
@@ -286,7 +285,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
286285
{
287286
"content": "string",
288287
"role": "user",
289-
"phase": "commentary",
290288
"type": "message",
291289
}
292290
],

tests/api_resources/test_conversations.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
3232
{
3333
"content": "string",
3434
"role": "user",
35-
"phase": "commentary",
3635
"type": "message",
3736
}
3837
],
@@ -196,7 +195,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
196195
{
197196
"content": "string",
198197
"role": "user",
199-
"phase": "commentary",
200198
"type": "message",
201199
}
202200
],

tests/api_resources/test_responses.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,6 @@ def test_method_compact_with_all_params(self, client: OpenAI) -> None:
385385
input="string",
386386
instructions="instructions",
387387
previous_response_id="resp_123",
388-
prompt_cache_key="prompt_cache_key",
389388
)
390389
assert_matches_type(CompactedResponse, response, path=["response"])
391390

@@ -794,7 +793,6 @@ async def test_method_compact_with_all_params(self, async_client: AsyncOpenAI) -
794793
input="string",
795794
instructions="instructions",
796795
previous_response_id="resp_123",
797-
prompt_cache_key="prompt_cache_key",
798796
)
799797
assert_matches_type(CompactedResponse, response, path=["response"])
800798

0 commit comments

Comments
 (0)