Skip to content

Commit a7a6016

Browse files
release: 2.18.0 (#2846)
* codegen metadata * test note * feat(api): add context_management to responses * feat(api): responses context_management * release: 2.18.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> Co-authored-by: Alex Chang <apcha@openai.com>
1 parent e888873 commit a7a6016

File tree

17 files changed

+178
-37
lines changed

17 files changed

+178
-37
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "2.17.0"
2+
".": "2.18.0"
33
}

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 137
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-64c3a646eb5dcad2b7ff7bd976c0e312b886676a542f6ffcd9a6c8503ae24c58.yml
3-
openapi_spec_hash: 91b1b7bf3c1a6b6c9c7507d4cac8fe2a
4-
config_hash: f8e6baff429cf000b8e4ba1da08dff47
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-bff810f46da56eff8d5e189b0d1f56ac07a8289723666138549d4239cad7c2ea.yml
3+
openapi_spec_hash: 7532ce5a6f490c8f5d1e079c76c70535
4+
config_hash: a1454ffd9612dee11f9d5a98e55eac9e

CHANGELOG.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,14 @@
11
# Changelog
22

3+
## 2.18.0 (2026-02-09)
4+
5+
Full Changelog: [v2.17.0...v2.18.0](https://github.com/openai/openai-python/compare/v2.17.0...v2.18.0)
6+
7+
### Features
8+
9+
* **api:** add context_management to responses ([137e992](https://github.com/openai/openai-python/commit/137e992b80956401d1867274fa7a0969edfdba54))
10+
* **api:** responses context_management ([c3bd017](https://github.com/openai/openai-python/commit/c3bd017318347af0a0105a7e975c8d91e22f7941))
11+
312
## 2.17.0 (2026-02-05)
413

514
Full Changelog: [v2.16.0...v2.17.0](https://github.com/openai/openai-python/compare/v2.16.0...v2.17.0)

examples/realtime/azure_realtime.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,7 @@ async def main() -> None:
4141
# The APIs are compatible with the OpenAI client library.
4242
# You can use the OpenAI client library to access the Azure OpenAI APIs.
4343
# Make sure to set the baseURL and apiKey to use the Azure OpenAI endpoint and token.
44-
client = AsyncOpenAI(
45-
websocket_base_url=base_url,
46-
api_key=token
47-
)
44+
client = AsyncOpenAI(websocket_base_url=base_url, api_key=token)
4845
async with client.realtime.connect(
4946
model=deployment_name,
5047
) as connection:

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "2.17.0"
3+
version = "2.18.0"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/openai/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "2.17.0" # x-release-please-version
4+
__version__ = "2.18.0" # x-release-please-version

src/openai/resources/images.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -172,8 +172,8 @@ def edit(
172172
173173
input_fidelity: Control how much effort the model will exert to match the style and features,
174174
especially facial features, of input images. This parameter is only supported
175-
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
176-
`low`. Defaults to `low`.
175+
for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
176+
`gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
177177
178178
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
179179
indicate where `image` should be edited. If there are multiple images provided,
@@ -291,8 +291,8 @@ def edit(
291291
292292
input_fidelity: Control how much effort the model will exert to match the style and features,
293293
especially facial features, of input images. This parameter is only supported
294-
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
295-
`low`. Defaults to `low`.
294+
for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
295+
`gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
296296
297297
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
298298
indicate where `image` should be edited. If there are multiple images provided,
@@ -406,8 +406,8 @@ def edit(
406406
407407
input_fidelity: Control how much effort the model will exert to match the style and features,
408408
especially facial features, of input images. This parameter is only supported
409-
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
410-
`low`. Defaults to `low`.
409+
for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
410+
`gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
411411
412412
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
413413
indicate where `image` should be edited. If there are multiple images provided,
@@ -1068,8 +1068,8 @@ async def edit(
10681068
10691069
input_fidelity: Control how much effort the model will exert to match the style and features,
10701070
especially facial features, of input images. This parameter is only supported
1071-
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
1072-
`low`. Defaults to `low`.
1071+
for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
1072+
`gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
10731073
10741074
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
10751075
indicate where `image` should be edited. If there are multiple images provided,
@@ -1187,8 +1187,8 @@ async def edit(
11871187
11881188
input_fidelity: Control how much effort the model will exert to match the style and features,
11891189
especially facial features, of input images. This parameter is only supported
1190-
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
1191-
`low`. Defaults to `low`.
1190+
for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
1191+
`gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
11921192
11931193
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
11941194
indicate where `image` should be edited. If there are multiple images provided,
@@ -1302,8 +1302,8 @@ async def edit(
13021302
13031303
input_fidelity: Control how much effort the model will exert to match the style and features,
13041304
especially facial features, of input images. This parameter is only supported
1305-
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
1306-
`low`. Defaults to `low`.
1305+
for `gpt-image-1` and `gpt-image-1.5` and later models, unsupported for
1306+
`gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
13071307
13081308
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
13091309
indicate where `image` should be edited. If there are multiple images provided,

src/openai/resources/responses/responses.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ def create(
9595
self,
9696
*,
9797
background: Optional[bool] | Omit = omit,
98+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
9899
conversation: Optional[response_create_params.Conversation] | Omit = omit,
99100
include: Optional[List[ResponseIncludable]] | Omit = omit,
100101
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -147,6 +148,8 @@ def create(
147148
background: Whether to run the model response in the background.
148149
[Learn more](https://platform.openai.com/docs/guides/background).
149150
151+
context_management: Context management configuration for this request.
152+
150153
conversation: The conversation that this response belongs to. Items from this conversation are
151154
prepended to `input_items` for this response request. Input items and output
152155
items from this response are automatically added to this conversation after this
@@ -341,6 +344,7 @@ def create(
341344
*,
342345
stream: Literal[True],
343346
background: Optional[bool] | Omit = omit,
347+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
344348
conversation: Optional[response_create_params.Conversation] | Omit = omit,
345349
include: Optional[List[ResponseIncludable]] | Omit = omit,
346350
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -399,6 +403,8 @@ def create(
399403
background: Whether to run the model response in the background.
400404
[Learn more](https://platform.openai.com/docs/guides/background).
401405
406+
context_management: Context management configuration for this request.
407+
402408
conversation: The conversation that this response belongs to. Items from this conversation are
403409
prepended to `input_items` for this response request. Input items and output
404410
items from this response are automatically added to this conversation after this
@@ -586,6 +592,7 @@ def create(
586592
*,
587593
stream: bool,
588594
background: Optional[bool] | Omit = omit,
595+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
589596
conversation: Optional[response_create_params.Conversation] | Omit = omit,
590597
include: Optional[List[ResponseIncludable]] | Omit = omit,
591598
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -644,6 +651,8 @@ def create(
644651
background: Whether to run the model response in the background.
645652
[Learn more](https://platform.openai.com/docs/guides/background).
646653
654+
context_management: Context management configuration for this request.
655+
647656
conversation: The conversation that this response belongs to. Items from this conversation are
648657
prepended to `input_items` for this response request. Input items and output
649658
items from this response are automatically added to this conversation after this
@@ -829,6 +838,7 @@ def create(
829838
self,
830839
*,
831840
background: Optional[bool] | Omit = omit,
841+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
832842
conversation: Optional[response_create_params.Conversation] | Omit = omit,
833843
include: Optional[List[ResponseIncludable]] | Omit = omit,
834844
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -868,6 +878,7 @@ def create(
868878
body=maybe_transform(
869879
{
870880
"background": background,
881+
"context_management": context_management,
871882
"conversation": conversation,
872883
"include": include,
873884
"input": input,
@@ -930,6 +941,7 @@ def stream(
930941
input: Union[str, ResponseInputParam],
931942
model: ResponsesModel,
932943
background: Optional[bool] | Omit = omit,
944+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
933945
text_format: type[TextFormatT] | Omit = omit,
934946
tools: Iterable[ParseableToolParam] | Omit = omit,
935947
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -970,6 +982,7 @@ def stream(
970982
input: Union[str, ResponseInputParam] | Omit = omit,
971983
model: ResponsesModel | Omit = omit,
972984
background: Optional[bool] | Omit = omit,
985+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
973986
text_format: type[TextFormatT] | Omit = omit,
974987
tools: Iterable[ParseableToolParam] | Omit = omit,
975988
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -1006,6 +1019,7 @@ def stream(
10061019
new_response_args = {
10071020
"input": input,
10081021
"model": model,
1022+
"context_management": context_management,
10091023
"conversation": conversation,
10101024
"include": include,
10111025
"instructions": instructions,
@@ -1061,6 +1075,7 @@ def stream(
10611075
input=input,
10621076
model=model,
10631077
tools=tools,
1078+
context_management=context_management,
10641079
conversation=conversation,
10651080
include=include,
10661081
instructions=instructions,
@@ -1118,6 +1133,7 @@ def parse(
11181133
*,
11191134
text_format: type[TextFormatT] | Omit = omit,
11201135
background: Optional[bool] | Omit = omit,
1136+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
11211137
conversation: Optional[response_create_params.Conversation] | Omit = omit,
11221138
include: Optional[List[ResponseIncludable]] | Omit = omit,
11231139
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -1176,6 +1192,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
11761192
body=maybe_transform(
11771193
{
11781194
"background": background,
1195+
"context_management": context_management,
11791196
"conversation": conversation,
11801197
"include": include,
11811198
"input": input,
@@ -1709,6 +1726,7 @@ async def create(
17091726
self,
17101727
*,
17111728
background: Optional[bool] | Omit = omit,
1729+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
17121730
conversation: Optional[response_create_params.Conversation] | Omit = omit,
17131731
include: Optional[List[ResponseIncludable]] | Omit = omit,
17141732
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -1761,6 +1779,8 @@ async def create(
17611779
background: Whether to run the model response in the background.
17621780
[Learn more](https://platform.openai.com/docs/guides/background).
17631781
1782+
context_management: Context management configuration for this request.
1783+
17641784
conversation: The conversation that this response belongs to. Items from this conversation are
17651785
prepended to `input_items` for this response request. Input items and output
17661786
items from this response are automatically added to this conversation after this
@@ -1955,6 +1975,7 @@ async def create(
19551975
*,
19561976
stream: Literal[True],
19571977
background: Optional[bool] | Omit = omit,
1978+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
19581979
conversation: Optional[response_create_params.Conversation] | Omit = omit,
19591980
include: Optional[List[ResponseIncludable]] | Omit = omit,
19601981
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2013,6 +2034,8 @@ async def create(
20132034
background: Whether to run the model response in the background.
20142035
[Learn more](https://platform.openai.com/docs/guides/background).
20152036
2037+
context_management: Context management configuration for this request.
2038+
20162039
conversation: The conversation that this response belongs to. Items from this conversation are
20172040
prepended to `input_items` for this response request. Input items and output
20182041
items from this response are automatically added to this conversation after this
@@ -2200,6 +2223,7 @@ async def create(
22002223
*,
22012224
stream: bool,
22022225
background: Optional[bool] | Omit = omit,
2226+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
22032227
conversation: Optional[response_create_params.Conversation] | Omit = omit,
22042228
include: Optional[List[ResponseIncludable]] | Omit = omit,
22052229
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2258,6 +2282,8 @@ async def create(
22582282
background: Whether to run the model response in the background.
22592283
[Learn more](https://platform.openai.com/docs/guides/background).
22602284
2285+
context_management: Context management configuration for this request.
2286+
22612287
conversation: The conversation that this response belongs to. Items from this conversation are
22622288
prepended to `input_items` for this response request. Input items and output
22632289
items from this response are automatically added to this conversation after this
@@ -2443,6 +2469,7 @@ async def create(
24432469
self,
24442470
*,
24452471
background: Optional[bool] | Omit = omit,
2472+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
24462473
conversation: Optional[response_create_params.Conversation] | Omit = omit,
24472474
include: Optional[List[ResponseIncludable]] | Omit = omit,
24482475
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2482,6 +2509,7 @@ async def create(
24822509
body=await async_maybe_transform(
24832510
{
24842511
"background": background,
2512+
"context_management": context_management,
24852513
"conversation": conversation,
24862514
"include": include,
24872515
"input": input,
@@ -2544,6 +2572,7 @@ def stream(
25442572
input: Union[str, ResponseInputParam],
25452573
model: ResponsesModel,
25462574
background: Optional[bool] | Omit = omit,
2575+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
25472576
text_format: type[TextFormatT] | Omit = omit,
25482577
tools: Iterable[ParseableToolParam] | Omit = omit,
25492578
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -2584,6 +2613,7 @@ def stream(
25842613
input: Union[str, ResponseInputParam] | Omit = omit,
25852614
model: ResponsesModel | Omit = omit,
25862615
background: Optional[bool] | Omit = omit,
2616+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
25872617
text_format: type[TextFormatT] | Omit = omit,
25882618
tools: Iterable[ParseableToolParam] | Omit = omit,
25892619
conversation: Optional[response_create_params.Conversation] | Omit = omit,
@@ -2620,6 +2650,7 @@ def stream(
26202650
new_response_args = {
26212651
"input": input,
26222652
"model": model,
2653+
"context_management": context_management,
26232654
"conversation": conversation,
26242655
"include": include,
26252656
"instructions": instructions,
@@ -2675,6 +2706,7 @@ def stream(
26752706
model=model,
26762707
stream=True,
26772708
tools=tools,
2709+
context_management=context_management,
26782710
conversation=conversation,
26792711
include=include,
26802712
instructions=instructions,
@@ -2736,6 +2768,7 @@ async def parse(
27362768
*,
27372769
text_format: type[TextFormatT] | Omit = omit,
27382770
background: Optional[bool] | Omit = omit,
2771+
context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
27392772
conversation: Optional[response_create_params.Conversation] | Omit = omit,
27402773
include: Optional[List[ResponseIncludable]] | Omit = omit,
27412774
input: Union[str, ResponseInputParam] | Omit = omit,
@@ -2794,6 +2827,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
27942827
body=maybe_transform(
27952828
{
27962829
"background": background,
2830+
"context_management": context_management,
27972831
"conversation": conversation,
27982832
"include": include,
27992833
"input": input,

0 commit comments

Comments
 (0)