Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions src/openai/types/responses/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,15 +307,28 @@ class Response(BaseModel):

@property
def output_text(self) -> str:
"""Convenience property that aggregates all `output_text` items from the `output` list.
"""Convenience property that aggregates all text content from the `output` list.

If no `output_text` content blocks exist, then an empty string is returned.
Includes text from:
- ``message`` output items (``output_text`` content blocks)
- ``code_interpreter_call`` items (log output)
- ``shell_call_output`` items (stdout)

If no text content exists, then an empty string is returned.
"""
texts: List[str] = []
for output in self.output:
if output.type == "message":
for content in output.content:
if content.type == "output_text":
texts.append(content.text)
elif output.type == "code_interpreter_call":
for item in output.outputs or []:
if item.type == "logs" and item.logs:
texts.append(item.logs)
elif output.type == "shell_call_output":
for item in output.output:
if item.stdout:
texts.append(item.stdout)

return "".join(texts)
100 changes: 100 additions & 0 deletions tests/lib/responses/test_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,106 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
)


@pytest.mark.respx(base_url=base_url)
def test_output_text_includes_code_interpreter_logs(client: OpenAI, respx_mock: MockRouter) -> None:
"""output_text should include both message text and code interpreter log output."""
response = make_snapshot_request(
lambda c: c.responses.create(
model="gpt-4o-mini",
input="Calculate 2+2 using code interpreter",
tools=[{"type": "code_interpreter", "container": {"type": "auto"}}],
),
content_snapshot=snapshot(
'{"id": "resp_test_ci_001", "object": "response", "created_at": 1754925900, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "ci_001", "type": "code_interpreter_call", "code": "result = 2 + 2\\nprint(result)", "container_id": "cntr_001", "outputs": [{"type": "logs", "logs": "4"}], "status": "completed"}, {"id": "msg_001", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": "The result is 4."}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 20, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 30, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 50}, "user": null, "metadata": {}}'
),
path="/responses",
mock_client=client,
respx_mock=respx_mock,
)

assert response.output_text == "4The result is 4."


@pytest.mark.respx(base_url=base_url)
def test_output_text_code_interpreter_only(client: OpenAI, respx_mock: MockRouter) -> None:
"""output_text should return code interpreter logs even without a message item."""
response = make_snapshot_request(
lambda c: c.responses.create(
model="gpt-4o-mini",
input="Run print('hello')",
tools=[{"type": "code_interpreter", "container": {"type": "auto"}}],
),
content_snapshot=snapshot(
'{"id": "resp_test_ci_002", "object": "response", "created_at": 1754925900, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "ci_002", "type": "code_interpreter_call", "code": "print(\'hello\')", "container_id": "cntr_002", "outputs": [{"type": "logs", "logs": "hello"}], "status": "completed"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 20, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 10, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 30}, "user": null, "metadata": {}}'
),
path="/responses",
mock_client=client,
respx_mock=respx_mock,
)

assert response.output_text == "hello"


@pytest.mark.respx(base_url=base_url)
def test_output_text_code_interpreter_outputs_none(client: OpenAI, respx_mock: MockRouter) -> None:
"""output_text should handle code_interpreter_call with outputs=null gracefully."""
response = make_snapshot_request(
lambda c: c.responses.create(
model="gpt-4o-mini",
input="Run something",
tools=[{"type": "code_interpreter", "container": {"type": "auto"}}],
),
content_snapshot=snapshot(
'{"id": "resp_test_ci_003", "object": "response", "created_at": 1754925900, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "ci_003", "type": "code_interpreter_call", "code": "x = 1", "container_id": "cntr_003", "outputs": null, "status": "completed"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 20, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 5, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 25}, "user": null, "metadata": {}}'
),
path="/responses",
mock_client=client,
respx_mock=respx_mock,
)

assert response.output_text == ""


@pytest.mark.respx(base_url=base_url)
def test_output_text_code_interpreter_image_only(client: OpenAI, respx_mock: MockRouter) -> None:
"""output_text should skip image outputs from code_interpreter_call."""
response = make_snapshot_request(
lambda c: c.responses.create(
model="gpt-4o-mini",
input="Generate a plot",
tools=[{"type": "code_interpreter", "container": {"type": "auto"}}],
),
content_snapshot=snapshot(
'{"id": "resp_test_ci_004", "object": "response", "created_at": 1754925900, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "ci_004", "type": "code_interpreter_call", "code": "import matplotlib", "container_id": "cntr_004", "outputs": [{"type": "image", "url": "https://example.com/plot.png"}], "status": "completed"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 20, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 5, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 25}, "user": null, "metadata": {}}'
),
path="/responses",
mock_client=client,
respx_mock=respx_mock,
)

assert response.output_text == ""


@pytest.mark.respx(base_url=base_url)
def test_output_text_shell_call_output(client: OpenAI, respx_mock: MockRouter) -> None:
"""output_text should include stdout from shell_call_output items."""
response = make_snapshot_request(
lambda c: c.responses.create(
model="gpt-4o-mini",
input="List files",
tools=[{"type": "shell", "shell": {"type": "bash"}}],
),
content_snapshot=snapshot(
'{"id": "resp_test_shell_001", "object": "response", "created_at": 1754925900, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "shell_001", "type": "shell_call_output", "call_id": "call_001", "output": [{"stdout": "file1.txt\\nfile2.txt", "stderr": "", "outcome": {"type": "exit", "exit_code": 0}}], "status": "completed"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 20, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 10, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 30}, "user": null, "metadata": {}}'
),
path="/responses",
mock_client=client,
respx_mock=respx_mock,
)

assert response.output_text == "file1.txt\nfile2.txt"


@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
Expand Down