diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml deleted file mode 100644 index 503de9d99a..0000000000 --- a/.github/workflows/release-doctor.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Release Doctor -on: - push: - branches: - - main - workflow_dispatch: - -jobs: - release_doctor: - name: release doctor - runs-on: ubuntu-latest - environment: publish - if: github.repository == 'openai/openai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') - - steps: - - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - - - name: Check release environment - run: | - bash ./bin/check-release-environment - env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} - PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 527d2e30b0..7ef8288ed5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.32.0" + ".": "2.33.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index f069d6c8b9..9a106e0f6d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 152 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7c540cce6eb30401259f4831ea9803b6d88501605d13734f98212cbb3b199e10.yml -openapi_spec_hash: 06e656be22bbb92689954253668b42fc -config_hash: 1a88b104658b6c854117996c080ebe6b +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-64c6ba619ccbf87e56b4f464230d04401fd78ad924d2606176309d19ca281af5.yml +openapi_spec_hash: 5e4f2073040a12c26ce58e86a72fe47e +config_hash: 50c98d8869a8cfdee2ab7dc664c4b6fe diff --git a/CHANGELOG.md b/CHANGELOG.md index 2640bc3d63..effb1cc263 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 2.33.0 (2026-04-28) + +Full Changelog: [v2.32.0...v2.33.0](https://github.com/openai/openai-python/compare/v2.32.0...v2.33.0) + +### Features + +* **api:** api update ([18f834a](https://github.com/openai/openai-python/commit/18f834a54f92ea827452471a46a4f442f251e2c8)) + + +### Bug Fixes + +* **api:** correct prompt_cache_retention enum value from in-memory to in_memory ([#1822](https://github.com/openai/openai-python/issues/1822)) ([f9d2d13](https://github.com/openai/openai-python/commit/f9d2d1359688a6247ecba858fc687173c480c9c8)) + + +### Chores + +* **ci:** remove release-doctor workflow ([00b2091](https://github.com/openai/openai-python/commit/00b20910e3539842f21d86ab5928fb5216d3a765)) + ## 2.32.0 (2026-04-15) Full Changelog: [v2.31.0...v2.32.0](https://github.com/openai/openai-python/compare/v2.31.0...v2.32.0) diff --git a/bin/check-release-environment b/bin/check-release-environment deleted file mode 100644 index 044ed525d1..0000000000 --- a/bin/check-release-environment +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -errors=() - -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - -if [ -z "${PYPI_TOKEN}" ]; then - errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") -fi - -lenErrors=${#errors[@]} - -if [[ lenErrors -gt 0 ]]; then - echo -e "Found the following errors in the release environment:\n" - - for error in "${errors[@]}"; do - echo -e "- $error\n" - done - - exit 1 -fi - -echo "The environment is ready to push releases!" diff --git a/pyproject.toml b/pyproject.toml index d0d533e8a6..b2f4dd11cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "2.32.0" +version = "2.33.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index a98695ba0e..b73f7aa7bd 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "2.32.0" # x-release-please-version +__version__ = "2.33.0" # x-release-please-version diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 845bd1a1e1..8b4fc12ae9 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -109,7 +109,7 @@ def parse( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -264,7 +264,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -571,7 +571,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -877,7 +877,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -1182,7 +1182,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -1461,7 +1461,7 @@ def stream( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -1612,7 +1612,7 @@ async def parse( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -1767,7 +1767,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2074,7 +2074,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2380,7 +2380,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2685,7 +2685,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2964,7 +2964,7 @@ def stream( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index ab5f7688a5..48705098cc 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -146,7 +146,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -396,7 +396,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -645,7 +645,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -892,7 +892,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -995,7 +995,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1036,7 +1036,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1187,7 +1187,7 @@ def parse( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1686,6 +1686,7 @@ def compact( instructions: Optional[str] | Omit = omit, previous_response_id: Optional[str] | Omit = omit, prompt_cache_key: Optional[str] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1723,6 +1724,8 @@ def compact( prompt_cache_key: A key to use when reading from or writing to the prompt cache. + prompt_cache_retention: How long to retain a prompt cache entry created by this request. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1740,6 +1743,7 @@ def compact( "instructions": instructions, "previous_response_id": previous_response_id, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, }, response_compact_params.ResponseCompactParams, ), @@ -1823,7 +1827,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2073,7 +2077,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2322,7 +2326,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2569,7 +2573,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2672,7 +2676,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2713,7 +2717,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2868,7 +2872,7 @@ async def parse( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -3367,6 +3371,7 @@ async def compact( instructions: Optional[str] | Omit = omit, previous_response_id: Optional[str] | Omit = omit, prompt_cache_key: Optional[str] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -3404,6 +3409,8 @@ async def compact( prompt_cache_key: A key to use when reading from or writing to the prompt cache. + prompt_cache_retention: How long to retain a prompt cache entry created by this request. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -3421,6 +3428,7 @@ async def compact( "instructions": instructions, "previous_response_id": previous_response_id, "prompt_cache_key": prompt_cache_key, + "prompt_cache_retention": prompt_cache_retention, }, response_compact_params.ResponseCompactParams, ), @@ -4543,7 +4551,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -4623,7 +4631,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 8e71ccbe41..0379ee0865 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -185,7 +185,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index ada0783bce..0d2491ea7c 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -214,7 +214,7 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] = None """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/response_compact_params.py b/src/openai/types/responses/response_compact_params.py index 0b163a9e78..2575438b34 100644 --- a/src/openai/types/responses/response_compact_params.py +++ b/src/openai/types/responses/response_compact_params.py @@ -140,3 +140,6 @@ class ResponseCompactParams(TypedDict, total=False): prompt_cache_key: Optional[str] """A key to use when reading from or writing to the prompt cache.""" + + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] + """How long to retain a prompt cache entry created by this request.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index bf7170da1f..a04495f40a 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -152,7 +152,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/responses_client_event.py b/src/openai/types/responses/responses_client_event.py index 2bc6f899c5..5f9e73c61f 100644 --- a/src/openai/types/responses/responses_client_event.py +++ b/src/openai/types/responses/responses_client_event.py @@ -184,7 +184,7 @@ class ResponsesClientEvent(BaseModel): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] = None """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/responses_client_event_param.py b/src/openai/types/responses/responses_client_event_param.py index 08596ef9ea..249c812116 100644 --- a/src/openai/types/responses/responses_client_event_param.py +++ b/src/openai/types/responses/responses_client_event_param.py @@ -185,7 +185,7 @@ class ResponsesClientEventParam(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index a75764b5e9..ea3066a505 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -73,7 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", @@ -207,7 +207,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", @@ -516,7 +516,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", @@ -650,7 +650,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 0b871d525d..40405f61b2 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", @@ -140,7 +140,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", @@ -388,6 +388,7 @@ def test_method_compact_with_all_params(self, client: OpenAI) -> None: instructions="instructions", previous_response_id="resp_123", prompt_cache_key="prompt_cache_key", + prompt_cache_retention="in_memory", ) assert_matches_type(CompactedResponse, response, path=["response"]) @@ -463,7 +464,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", @@ -551,7 +552,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", @@ -799,6 +800,7 @@ async def test_method_compact_with_all_params(self, async_client: AsyncOpenAI) - instructions="instructions", previous_response_id="resp_123", prompt_cache_key="prompt_cache_key", + prompt_cache_retention="in_memory", ) assert_matches_type(CompactedResponse, response, path=["response"])