diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index 5676eb0b63..670b8c1656 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -131,8 +131,8 @@ def parse_response( ) -def parse_text(text: str, text_format: type[TextFormatT] | Omit) -> TextFormatT | None: - if not is_given(text_format): +def parse_text(text: str | None, text_format: type[TextFormatT] | Omit) -> TextFormatT | None: + if text is None or not is_given(text_format): return None if is_basemodel_type(text_format): diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 6bac7d65de..9373d6d0eb 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -315,6 +315,7 @@ def output_text(self) -> str: if output.type == "message": for content in output.content: if content.type == "output_text": - texts.append(content.text) + if content.text is not None: + texts.append(content.text) return "".join(texts) diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py index 2386fcb3c0..c87b2c82dc 100644 --- a/src/openai/types/responses/response_output_text.py +++ b/src/openai/types/responses/response_output_text.py @@ -122,8 +122,8 @@ class ResponseOutputText(BaseModel): annotations: List[Annotation] """The annotations of the text output.""" - text: str - """The text output from the model.""" + text: Optional[str] = None + """The text output from the model, when present.""" type: Literal["output_text"] """The type of the output text. Always `output_text`.""" diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py index 8e5f16df95..94ecad3774 100644 --- a/tests/lib/responses/test_responses.py +++ b/tests/lib/responses/test_responses.py @@ -61,3 +61,21 @@ def test_parse_method_definition_in_sync(sync: bool, client: OpenAI, async_clien checking_client.responses.parse, exclude_params={"tools"}, ) + + +@pytest.mark.respx(base_url=base_url) +def test_output_text_ignores_null_text(client: OpenAI, respx_mock: MockRouter) -> None: + response = make_snapshot_request( + lambda c: c.responses.create( + model="gpt-4o-mini", + input="hi", + ), + content_snapshot=snapshot( + '{"id": "resp_null", "object": "response", "created_at": 1754925861, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "msg_null", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": null}, {"type": "output_text", "annotations": [], "logprobs": [], "text": "ok"}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 1, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 1, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 2}, "user": null, "metadata": {}}' + ), + path="/responses", + mock_client=client, + respx_mock=respx_mock, + ) + + assert response.output_text == "ok"