diff --git a/src/openai/_response.py b/src/openai/_response.py index 350da38dd4..97b91f7d7c 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -17,6 +17,8 @@ AsyncIterator, cast, overload, + + ) from typing_extensions import Awaitable, ParamSpec, override, get_origin diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index ce9effd75e..8da936538a 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import List, Union, Optional , Dict, Any from typing_extensions import Literal, TypeAlias from .tool import Tool @@ -30,6 +30,7 @@ class IncompleteDetails(BaseModel): """The reason why the response is incomplete.""" +<<<<<<< Updated upstream ToolChoice: TypeAlias = Union[ ToolChoiceOptions, ToolChoiceAllowed, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom ] @@ -38,6 +39,11 @@ class IncompleteDetails(BaseModel): class Conversation(BaseModel): id: str """The unique ID of the conversation.""" +======= +ToolChoice: TypeAlias = Optional[Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp, Dict[str, Any]]] + + +>>>>>>> Stashed changes class Response(BaseModel): diff --git a/src/openai/types/responses/tool_choice_options.py b/src/openai/types/responses/tool_choice_options.py index c200db54e1..1ee651fc87 100644 --- a/src/openai/types/responses/tool_choice_options.py +++ b/src/openai/types/responses/tool_choice_options.py @@ -4,4 +4,6 @@ __all__ = ["ToolChoiceOptions"] -ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"] +# ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"] +ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required", "allowed_tools"] + diff --git a/tests/api_resources/responses/test_tool_choice_options.py b/tests/api_resources/responses/test_tool_choice_options.py new file mode 100644 index 0000000000..4bfbb10a46 --- /dev/null +++ b/tests/api_resources/responses/test_tool_choice_options.py @@ -0,0 +1,13 @@ +from openai.types.responses import ToolChoiceOptions + +def test_tool_choice_valid_values(): + """Test that ToolChoiceOptions accepts valid values.""" + valid_values = ["none", "auto", "required", "allowed_tools"] + for value in valid_values: + assert value in ToolChoiceOptions.__args__ # check if Literal includes it + + +def test_tool_choice_invalid_value(): + """Test that an invalid tool_choice raises an error.""" + invalid_value = "invalid_mode" + assert invalid_value not in ToolChoiceOptions.__args__ diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 0cc20e926b..3084bf3cb4 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -708,3 +708,31 @@ async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: await async_client.responses.with_raw_response.cancel( "", ) + + + + + +import pytest +from openai import OpenAI + +def test_tool_choice_object(monkeypatch): + client = OpenAI() + + # Mock transport (so we don’t actually call API) + def fake_request(*args, **kwargs): + assert "tool_choice" in kwargs["json"] + assert kwargs["json"]["tool_choice"]["type"] == "code_interpreter" + return {"id": "resp_test", "output": "ok"} + + monkeypatch.setattr(client._client, "post", fake_request) + + response = client.responses.create( + model="gpt-5", + input="test", + tools=[{"type": "code_interpreter"}], + tool_choice={"type": "code_interpreter"}, + ) + + assert response["output"] == "ok" +