diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3b286e5ae..81f6dc20f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -12,7 +12,6 @@ jobs:
lint:
name: lint
runs-on: ubuntu-latest
-
steps:
- uses: actions/checkout@v4
@@ -33,7 +32,6 @@ jobs:
test:
name: test
runs-on: ubuntu-latest
-
steps:
- uses: actions/checkout@v4
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index b8dda9bfc..554e34bbe 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.29.0"
+ ".": "0.30.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 8a5068455..d1b99e300 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,4 @@
-configured_endpoints: 78
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runloop-ai%2Frunloop-3542d28a321a2d5ae372d8e99dc615fbda814a223e1c15717c37727a1dd00ff1.yml
+configured_endpoints: 79
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runloop-ai%2Frunloop-59d51521fb27127ec00a91d37d9cbaa484577dbd200d290e61c9d69d33b3c760.yml
+openapi_spec_hash: 4ad68555fc0ec3e969817bebff620e88
+config_hash: 4cb90c87fb61338e46c50cea9c42abd7
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9cc1ced03..7750bbe2e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,38 @@
# Changelog
+## 0.30.0 (2025-04-16)
+
+Full Changelog: [v0.29.0...v0.30.0](https://github.com/runloopai/api-client-python/compare/v0.29.0...v0.30.0)
+
+### Features
+
+* **api:** api update ([380287d](https://github.com/runloopai/api-client-python/commit/380287d31238c9cc1cf99a3c394f03141be6174d))
+* **api:** api update ([e9857e8](https://github.com/runloopai/api-client-python/commit/e9857e86999295ef3a8507f69c61ea6ee2861908))
+* **api:** api update ([#586](https://github.com/runloopai/api-client-python/issues/586)) ([eb6d1ba](https://github.com/runloopai/api-client-python/commit/eb6d1ba0b6420f256c8b40dbae75f8a51854d32d))
+
+
+### Bug Fixes
+
+* **client:** correctly reuse idempotency key ([a6ba920](https://github.com/runloopai/api-client-python/commit/a6ba9201bf5012822ba97fdfdc48e96668a2d22e))
+* **perf:** optimize some hot paths ([edf120c](https://github.com/runloopai/api-client-python/commit/edf120c4cfc3d0104c3735c6882787a039b21bce))
+* **perf:** skip traversing types for NotGiven values ([bcb8823](https://github.com/runloopai/api-client-python/commit/bcb8823c114d7171745010f41442932657fc0b76))
+
+
+### Chores
+
+* fix typos ([#582](https://github.com/runloopai/api-client-python/issues/582)) ([66d248c](https://github.com/runloopai/api-client-python/commit/66d248cf691f776d04df6aeb3273734bbf914a3b))
+* **internal:** expand CI branch coverage ([0b68591](https://github.com/runloopai/api-client-python/commit/0b68591b8977c6863d75a464045a958809cca096))
+* **internal:** reduce CI branch coverage ([58821a3](https://github.com/runloopai/api-client-python/commit/58821a3fc73f47ed0601a8fdbeaad79778765719))
+* **internal:** remove trailing character ([#584](https://github.com/runloopai/api-client-python/issues/584)) ([65bacb7](https://github.com/runloopai/api-client-python/commit/65bacb71b584b8b1a3f998efb2d28102ffa98d74))
+* **internal:** slight transform perf improvement ([#587](https://github.com/runloopai/api-client-python/issues/587)) ([ec630c4](https://github.com/runloopai/api-client-python/commit/ec630c44eefc52b89ce69277c9040fc93dd9241f))
+* **internal:** update pyright settings ([487213d](https://github.com/runloopai/api-client-python/commit/487213dcc570b7d538fbff04b0a942cdbc6e97c6))
+* slight wording improvement in README ([#588](https://github.com/runloopai/api-client-python/issues/588)) ([2eb6437](https://github.com/runloopai/api-client-python/commit/2eb643793ea4ce03000b5b4eae6f39843a74543b))
+
+
+### Documentation
+
+* swap examples used in readme ([#585](https://github.com/runloopai/api-client-python/issues/585)) ([adf9a26](https://github.com/runloopai/api-client-python/commit/adf9a26a3890b7bd5899d191927ed97a9402c864))
+
## 0.29.0 (2025-03-25)
Full Changelog: [v0.28.0...v0.29.0](https://github.com/runloopai/api-client-python/compare/v0.28.0...v0.29.0)
diff --git a/README.md b/README.md
index ec6ee93f1..1e29f41fd 100644
--- a/README.md
+++ b/README.md
@@ -145,8 +145,7 @@ from runloop_api_client import Runloop
client = Runloop()
-blueprint_view = client.blueprints.create(
- name="name",
+devbox_view = client.devboxes.create(
launch_parameters={
"after_idle": {
"idle_time_seconds": 0,
@@ -157,15 +156,15 @@ blueprint_view = client.blueprints.create(
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
)
-print(blueprint_view.launch_parameters)
+print(devbox_view.launch_parameters)
```
## File uploads
-Request parameters that correspond to file uploads can be passed as `bytes`, a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
+Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
```python
from pathlib import Path
diff --git a/api.md b/api.md
index aaecbbfdd..4ac1bcb8d 100644
--- a/api.md
+++ b/api.md
@@ -11,20 +11,22 @@ Types:
```python
from runloop_api_client.types import (
BenchmarkCreateParameters,
- BenchmarkListView,
BenchmarkRunListView,
BenchmarkRunView,
- BenchmarkView,
StartBenchmarkRunParameters,
+ BenchmarkCreateResponse,
+ BenchmarkRetrieveResponse,
+ BenchmarkListResponse,
+ BenchmarkListPublicResponse,
)
```
Methods:
-- client.benchmarks.create(\*\*params) -> BenchmarkView
-- client.benchmarks.retrieve(id) -> BenchmarkView
-- client.benchmarks.list(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkView]
-- client.benchmarks.list_public(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkView]
+- client.benchmarks.create(\*\*params) -> BenchmarkCreateResponse
+- client.benchmarks.retrieve(id) -> BenchmarkRetrieveResponse
+- client.benchmarks.list(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkListResponse]
+- client.benchmarks.list_public(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkListPublicResponse]
- client.benchmarks.start_run(\*\*params) -> BenchmarkRunView
## Runs
@@ -47,6 +49,7 @@ from runloop_api_client.types import (
BlueprintListView,
BlueprintPreviewView,
BlueprintView,
+ BlueprintDeleteResponse,
)
```
@@ -55,6 +58,7 @@ Methods:
- client.blueprints.create(\*\*params) -> BlueprintView
- client.blueprints.retrieve(id) -> BlueprintView
- client.blueprints.list(\*\*params) -> SyncBlueprintsCursorIDPage[BlueprintView]
+- client.blueprints.delete(id) -> object
- client.blueprints.logs(id) -> BlueprintBuildLogsListView
- client.blueprints.preview(\*\*params) -> BlueprintPreviewView
@@ -75,6 +79,7 @@ from runloop_api_client.types import (
DevboxDeleteDiskSnapshotResponse,
DevboxKeepAliveResponse,
DevboxReadFileContentsResponse,
+ DevboxRemoveTunnelResponse,
DevboxUploadFileResponse,
)
```
@@ -94,7 +99,7 @@ Methods:
- client.devboxes.keep_alive(id) -> object
- client.devboxes.list_disk_snapshots(\*\*params) -> SyncDiskSnapshotsCursorIDPage[DevboxSnapshotView]
- client.devboxes.read_file_contents(id, \*\*params) -> str
-- client.devboxes.remove_tunnel(id, \*\*params) -> DevboxTunnelView
+- client.devboxes.remove_tunnel(id, \*\*params) -> object
- client.devboxes.resume(id) -> DevboxView
- client.devboxes.shutdown(id) -> DevboxView
- client.devboxes.snapshot_disk(id, \*\*params) -> DevboxSnapshotView
@@ -262,25 +267,28 @@ from runloop_api_client.types import (
InputContext,
ScenarioCreateParameters,
ScenarioEnvironment,
- ScenarioListView,
ScenarioRunListView,
ScenarioRunView,
- ScenarioView,
ScoringContract,
ScoringContractResultView,
ScoringFunction,
ScoringFunctionResultView,
StartScenarioRunParameters,
+ ScenarioCreateResponse,
+ ScenarioRetrieveResponse,
+ ScenarioUpdateResponse,
+ ScenarioListResponse,
+ ScenarioListPublicResponse,
)
```
Methods:
-- client.scenarios.create(\*\*params) -> ScenarioView
-- client.scenarios.retrieve(id) -> ScenarioView
-- client.scenarios.update(id, \*\*params) -> ScenarioView
-- client.scenarios.list(\*\*params) -> SyncScenariosCursorIDPage[ScenarioView]
-- client.scenarios.list_public(\*\*params) -> SyncScenariosCursorIDPage[ScenarioView]
+- client.scenarios.create(\*\*params) -> ScenarioCreateResponse
+- client.scenarios.retrieve(id) -> ScenarioRetrieveResponse
+- client.scenarios.update(id, \*\*params) -> ScenarioUpdateResponse
+- client.scenarios.list(\*\*params) -> SyncScenariosCursorIDPage[ScenarioListResponse]
+- client.scenarios.list_public(\*\*params) -> SyncScenariosCursorIDPage[ScenarioListPublicResponse]
- client.scenarios.start_run(\*\*params) -> ScenarioRunView
## Runs
diff --git a/pyproject.toml b/pyproject.toml
index f77538f01..d328c6824 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "runloop_api_client"
-version = "0.29.0"
+version = "0.30.0"
description = "The official Python library for the runloop API"
dynamic = ["readme"]
license = "MIT"
@@ -147,6 +147,7 @@ exclude = [
]
reportImplicitOverride = true
+reportOverlappingOverload = false
reportImportCycles = false
reportPrivateUsage = false
diff --git a/src/runloop_api_client/_base_client.py b/src/runloop_api_client/_base_client.py
index 62879ec12..7d2d646f9 100644
--- a/src/runloop_api_client/_base_client.py
+++ b/src/runloop_api_client/_base_client.py
@@ -409,7 +409,8 @@ def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0
idempotency_header = self._idempotency_header
if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
- headers[idempotency_header] = options.idempotency_key or self._idempotency_key()
+ options.idempotency_key = options.idempotency_key or self._idempotency_key()
+ headers[idempotency_header] = options.idempotency_key
# Don't set these headers if they were already set or removed by the caller. We check
# `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case.
@@ -943,6 +944,10 @@ def _request(
request = self._build_request(options, retries_taken=retries_taken)
self._prepare_request(request)
+ if options.idempotency_key:
+ # ensure the idempotency key is reused between requests
+ input_options.idempotency_key = options.idempotency_key
+
kwargs: HttpxSendArgs = {}
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
@@ -1475,6 +1480,10 @@ async def _request(
request = self._build_request(options, retries_taken=retries_taken)
await self._prepare_request(request)
+ if options.idempotency_key:
+ # ensure the idempotency key is reused between requests
+ input_options.idempotency_key = options.idempotency_key
+
kwargs: HttpxSendArgs = {}
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
diff --git a/src/runloop_api_client/_models.py b/src/runloop_api_client/_models.py
index b51a1bf5f..349357169 100644
--- a/src/runloop_api_client/_models.py
+++ b/src/runloop_api_client/_models.py
@@ -681,7 +681,7 @@ def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
setattr(typ, "__pydantic_config__", config) # noqa: B010
-# our use of subclasssing here causes weirdness for type checkers,
+# our use of subclassing here causes weirdness for type checkers,
# so we just pretend that we don't subclass
if TYPE_CHECKING:
GenericModel = BaseModel
diff --git a/src/runloop_api_client/_utils/_transform.py b/src/runloop_api_client/_utils/_transform.py
index 18afd9d8b..b0cc20a73 100644
--- a/src/runloop_api_client/_utils/_transform.py
+++ b/src/runloop_api_client/_utils/_transform.py
@@ -5,13 +5,15 @@
import pathlib
from typing import Any, Mapping, TypeVar, cast
from datetime import date, datetime
-from typing_extensions import Literal, get_args, override, get_type_hints
+from typing_extensions import Literal, get_args, override, get_type_hints as _get_type_hints
import anyio
import pydantic
from ._utils import (
is_list,
+ is_given,
+ lru_cache,
is_mapping,
is_iterable,
)
@@ -108,6 +110,7 @@ class Params(TypedDict, total=False):
return cast(_T, transformed)
+@lru_cache(maxsize=8096)
def _get_annotated_type(type_: type) -> type | None:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
@@ -126,7 +129,7 @@ def _get_annotated_type(type_: type) -> type | None:
def _maybe_transform_key(key: str, type_: type) -> str:
"""Transform the given `data` based on the annotations provided in `type_`.
- Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
+ Note: this function only looks at `Annotated` types that contain `PropertyInfo` metadata.
"""
annotated_type = _get_annotated_type(type_)
if annotated_type is None:
@@ -142,6 +145,10 @@ def _maybe_transform_key(key: str, type_: type) -> str:
return key
+def _no_transform_needed(annotation: type) -> bool:
+ return annotation == float or annotation == int
+
+
def _transform_recursive(
data: object,
*,
@@ -184,6 +191,15 @@ def _transform_recursive(
return cast(object, data)
inner_type = extract_type_arg(stripped_type, 0)
+ if _no_transform_needed(inner_type):
+ # for some types there is no need to transform anything, so we can get a small
+ # perf boost from skipping that work.
+ #
+ # but we still need to convert to a list to ensure the data is json-serializable
+ if is_list(data):
+ return data
+ return list(data)
+
return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
if is_union_type(stripped_type):
@@ -245,6 +261,11 @@ def _transform_typeddict(
result: dict[str, object] = {}
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
+ if not is_given(value):
+ # we don't need to include `NotGiven` values here as they'll
+ # be stripped out before the request is sent anyway
+ continue
+
type_ = annotations.get(key)
if type_ is None:
# we do not have a type annotation for this field, leave it as is
@@ -332,6 +353,15 @@ async def _async_transform_recursive(
return cast(object, data)
inner_type = extract_type_arg(stripped_type, 0)
+ if _no_transform_needed(inner_type):
+ # for some types there is no need to transform anything, so we can get a small
+ # perf boost from skipping that work.
+ #
+ # but we still need to convert to a list to ensure the data is json-serializable
+ if is_list(data):
+ return data
+ return list(data)
+
return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
if is_union_type(stripped_type):
@@ -393,6 +423,11 @@ async def _async_transform_typeddict(
result: dict[str, object] = {}
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
+ if not is_given(value):
+ # we don't need to include `NotGiven` values here as they'll
+ # be stripped out before the request is sent anyway
+ continue
+
type_ = annotations.get(key)
if type_ is None:
# we do not have a type annotation for this field, leave it as is
@@ -400,3 +435,13 @@ async def _async_transform_typeddict(
else:
result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_)
return result
+
+
+@lru_cache(maxsize=8096)
+def get_type_hints(
+ obj: Any,
+ globalns: dict[str, Any] | None = None,
+ localns: Mapping[str, Any] | None = None,
+ include_extras: bool = False,
+) -> dict[str, Any]:
+ return _get_type_hints(obj, globalns=globalns, localns=localns, include_extras=include_extras)
diff --git a/src/runloop_api_client/_utils/_typing.py b/src/runloop_api_client/_utils/_typing.py
index 278749b14..1958820f8 100644
--- a/src/runloop_api_client/_utils/_typing.py
+++ b/src/runloop_api_client/_utils/_typing.py
@@ -13,6 +13,7 @@
get_origin,
)
+from ._utils import lru_cache
from .._types import InheritsGeneric
from .._compat import is_union as _is_union
@@ -66,6 +67,7 @@ def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]:
# Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]]
+@lru_cache(maxsize=8096)
def strip_annotated_type(typ: type) -> type:
if is_required_type(typ) or is_annotated_type(typ):
return strip_annotated_type(cast(type, get_args(typ)[0]))
diff --git a/src/runloop_api_client/_version.py b/src/runloop_api_client/_version.py
index c454e8b46..524beb9ff 100644
--- a/src/runloop_api_client/_version.py
+++ b/src/runloop_api_client/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "runloop_api_client"
-__version__ = "0.29.0" # x-release-please-version
+__version__ = "0.30.0" # x-release-please-version
diff --git a/src/runloop_api_client/resources/benchmarks/benchmarks.py b/src/runloop_api_client/resources/benchmarks/benchmarks.py
index 54501bfa3..550ef0867 100644
--- a/src/runloop_api_client/resources/benchmarks/benchmarks.py
+++ b/src/runloop_api_client/resources/benchmarks/benchmarks.py
@@ -35,8 +35,11 @@
)
from ...pagination import SyncBenchmarksCursorIDPage, AsyncBenchmarksCursorIDPage
from ..._base_client import AsyncPaginator, make_request_options
-from ...types.benchmark_view import BenchmarkView
from ...types.benchmark_run_view import BenchmarkRunView
+from ...types.benchmark_list_response import BenchmarkListResponse
+from ...types.benchmark_create_response import BenchmarkCreateResponse
+from ...types.benchmark_retrieve_response import BenchmarkRetrieveResponse
+from ...types.benchmark_list_public_response import BenchmarkListPublicResponse
__all__ = ["BenchmarksResource", "AsyncBenchmarksResource"]
@@ -68,6 +71,7 @@ def with_streaming_response(self) -> BenchmarksResourceWithStreamingResponse:
def create(
self,
*,
+ is_public: bool,
name: str,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
scenario_ids: Optional[List[str]] | NotGiven = NOT_GIVEN,
@@ -78,11 +82,13 @@ def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> BenchmarkView:
+ ) -> BenchmarkCreateResponse:
"""
Create a Benchmark with a set of Scenarios.
Args:
+ is_public: Whether this benchmark is public.
+
name: The name of the Benchmark.
metadata: User defined metadata to attach to the benchmark for organization.
@@ -103,6 +109,7 @@ def create(
"/v1/benchmarks",
body=maybe_transform(
{
+ "is_public": is_public,
"name": name,
"metadata": metadata,
"scenario_ids": scenario_ids,
@@ -116,7 +123,7 @@ def create(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=BenchmarkView,
+ cast_to=BenchmarkCreateResponse,
)
def retrieve(
@@ -129,7 +136,7 @@ def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BenchmarkView:
+ ) -> BenchmarkRetrieveResponse:
"""
Get a previously created Benchmark.
@@ -149,7 +156,7 @@ def retrieve(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=BenchmarkView,
+ cast_to=BenchmarkRetrieveResponse,
)
def list(
@@ -163,7 +170,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncBenchmarksCursorIDPage[BenchmarkView]:
+ ) -> SyncBenchmarksCursorIDPage[BenchmarkListResponse]:
"""
List all Benchmarks matching filter.
@@ -182,7 +189,7 @@ def list(
"""
return self._get_api_list(
"/v1/benchmarks",
- page=SyncBenchmarksCursorIDPage[BenchmarkView],
+ page=SyncBenchmarksCursorIDPage[BenchmarkListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -196,7 +203,7 @@ def list(
benchmark_list_params.BenchmarkListParams,
),
),
- model=BenchmarkView,
+ model=BenchmarkListResponse,
)
def list_public(
@@ -210,7 +217,7 @@ def list_public(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncBenchmarksCursorIDPage[BenchmarkView]:
+ ) -> SyncBenchmarksCursorIDPage[BenchmarkListPublicResponse]:
"""
List all public benchmarks matching filter.
@@ -229,7 +236,7 @@ def list_public(
"""
return self._get_api_list(
"/v1/benchmarks/list_public",
- page=SyncBenchmarksCursorIDPage[BenchmarkView],
+ page=SyncBenchmarksCursorIDPage[BenchmarkListPublicResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -243,7 +250,7 @@ def list_public(
benchmark_list_public_params.BenchmarkListPublicParams,
),
),
- model=BenchmarkView,
+ model=BenchmarkListPublicResponse,
)
def start_run(
@@ -328,6 +335,7 @@ def with_streaming_response(self) -> AsyncBenchmarksResourceWithStreamingRespons
async def create(
self,
*,
+ is_public: bool,
name: str,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
scenario_ids: Optional[List[str]] | NotGiven = NOT_GIVEN,
@@ -338,11 +346,13 @@ async def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> BenchmarkView:
+ ) -> BenchmarkCreateResponse:
"""
Create a Benchmark with a set of Scenarios.
Args:
+ is_public: Whether this benchmark is public.
+
name: The name of the Benchmark.
metadata: User defined metadata to attach to the benchmark for organization.
@@ -363,6 +373,7 @@ async def create(
"/v1/benchmarks",
body=await async_maybe_transform(
{
+ "is_public": is_public,
"name": name,
"metadata": metadata,
"scenario_ids": scenario_ids,
@@ -376,7 +387,7 @@ async def create(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=BenchmarkView,
+ cast_to=BenchmarkCreateResponse,
)
async def retrieve(
@@ -389,7 +400,7 @@ async def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> BenchmarkView:
+ ) -> BenchmarkRetrieveResponse:
"""
Get a previously created Benchmark.
@@ -409,7 +420,7 @@ async def retrieve(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=BenchmarkView,
+ cast_to=BenchmarkRetrieveResponse,
)
def list(
@@ -423,7 +434,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[BenchmarkView, AsyncBenchmarksCursorIDPage[BenchmarkView]]:
+ ) -> AsyncPaginator[BenchmarkListResponse, AsyncBenchmarksCursorIDPage[BenchmarkListResponse]]:
"""
List all Benchmarks matching filter.
@@ -442,7 +453,7 @@ def list(
"""
return self._get_api_list(
"/v1/benchmarks",
- page=AsyncBenchmarksCursorIDPage[BenchmarkView],
+ page=AsyncBenchmarksCursorIDPage[BenchmarkListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -456,7 +467,7 @@ def list(
benchmark_list_params.BenchmarkListParams,
),
),
- model=BenchmarkView,
+ model=BenchmarkListResponse,
)
def list_public(
@@ -470,7 +481,7 @@ def list_public(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[BenchmarkView, AsyncBenchmarksCursorIDPage[BenchmarkView]]:
+ ) -> AsyncPaginator[BenchmarkListPublicResponse, AsyncBenchmarksCursorIDPage[BenchmarkListPublicResponse]]:
"""
List all public benchmarks matching filter.
@@ -489,7 +500,7 @@ def list_public(
"""
return self._get_api_list(
"/v1/benchmarks/list_public",
- page=AsyncBenchmarksCursorIDPage[BenchmarkView],
+ page=AsyncBenchmarksCursorIDPage[BenchmarkListPublicResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -503,7 +514,7 @@ def list_public(
benchmark_list_public_params.BenchmarkListPublicParams,
),
),
- model=BenchmarkView,
+ model=BenchmarkListPublicResponse,
)
async def start_run(
diff --git a/src/runloop_api_client/resources/blueprints.py b/src/runloop_api_client/resources/blueprints.py
index fc0f54d35..67c540dc0 100644
--- a/src/runloop_api_client/resources/blueprints.py
+++ b/src/runloop_api_client/resources/blueprints.py
@@ -321,6 +321,46 @@ def list(
model=BlueprintView,
)
+ def delete(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> object:
+ """
+ Delete a previously created Blueprint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._post(
+ f"/v1/blueprints/{id}/delete",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=object,
+ )
+
def logs(
self,
id: str,
@@ -710,6 +750,46 @@ def list(
model=BlueprintView,
)
+ async def delete(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> object:
+ """
+ Delete a previously created Blueprint.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._post(
+ f"/v1/blueprints/{id}/delete",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=object,
+ )
+
async def logs(
self,
id: str,
@@ -825,6 +905,9 @@ def __init__(self, blueprints: BlueprintsResource) -> None:
self.list = to_raw_response_wrapper(
blueprints.list,
)
+ self.delete = to_raw_response_wrapper(
+ blueprints.delete,
+ )
self.logs = to_raw_response_wrapper(
blueprints.logs,
)
@@ -846,6 +929,9 @@ def __init__(self, blueprints: AsyncBlueprintsResource) -> None:
self.list = async_to_raw_response_wrapper(
blueprints.list,
)
+ self.delete = async_to_raw_response_wrapper(
+ blueprints.delete,
+ )
self.logs = async_to_raw_response_wrapper(
blueprints.logs,
)
@@ -867,6 +953,9 @@ def __init__(self, blueprints: BlueprintsResource) -> None:
self.list = to_streamed_response_wrapper(
blueprints.list,
)
+ self.delete = to_streamed_response_wrapper(
+ blueprints.delete,
+ )
self.logs = to_streamed_response_wrapper(
blueprints.logs,
)
@@ -888,6 +977,9 @@ def __init__(self, blueprints: AsyncBlueprintsResource) -> None:
self.list = async_to_streamed_response_wrapper(
blueprints.list,
)
+ self.delete = async_to_streamed_response_wrapper(
+ blueprints.delete,
+ )
self.logs = async_to_streamed_response_wrapper(
blueprints.logs,
)
diff --git a/src/runloop_api_client/resources/devboxes/devboxes.py b/src/runloop_api_client/resources/devboxes/devboxes.py
index 21a052b1d..7f20d6005 100644
--- a/src/runloop_api_client/resources/devboxes/devboxes.py
+++ b/src/runloop_api_client/resources/devboxes/devboxes.py
@@ -973,7 +973,7 @@ def remove_tunnel(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> DevboxTunnelView:
+ ) -> object:
"""
Remove a previously opened tunnel on the Devbox.
@@ -1002,7 +1002,7 @@ def remove_tunnel(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=DevboxTunnelView,
+ cast_to=object,
)
def resume(
@@ -2159,7 +2159,7 @@ async def remove_tunnel(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> DevboxTunnelView:
+ ) -> object:
"""
Remove a previously opened tunnel on the Devbox.
@@ -2188,7 +2188,7 @@ async def remove_tunnel(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=DevboxTunnelView,
+ cast_to=object,
)
async def resume(
diff --git a/src/runloop_api_client/resources/scenarios/scenarios.py b/src/runloop_api_client/resources/scenarios/scenarios.py
index 814c424b4..a13b714cb 100644
--- a/src/runloop_api_client/resources/scenarios/scenarios.py
+++ b/src/runloop_api_client/resources/scenarios/scenarios.py
@@ -45,11 +45,15 @@
from ...pagination import SyncScenariosCursorIDPage, AsyncScenariosCursorIDPage
from ...lib.polling import PollingConfig
from ..._base_client import AsyncPaginator, make_request_options
-from ...types.scenario_view import ScenarioView
from ...types.scenario_run_view import ScenarioRunView
from ...types.input_context_param import InputContextParam
+from ...types.scenario_list_response import ScenarioListResponse
from ...types.scoring_contract_param import ScoringContractParam
+from ...types.scenario_create_response import ScenarioCreateResponse
+from ...types.scenario_update_response import ScenarioUpdateResponse
from ...types.scenario_environment_param import ScenarioEnvironmentParam
+from ...types.scenario_retrieve_response import ScenarioRetrieveResponse
+from ...types.scenario_list_public_response import ScenarioListPublicResponse
__all__ = ["ScenariosResource", "AsyncScenariosResource"]
@@ -86,6 +90,7 @@ def create(
self,
*,
input_context: InputContextParam,
+ is_public: bool,
name: str,
scoring_contract: ScoringContractParam,
environment_parameters: Optional[ScenarioEnvironmentParam] | NotGiven = NOT_GIVEN,
@@ -98,7 +103,7 @@ def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> ScenarioView:
+ ) -> ScenarioCreateResponse:
"""
Create a Scenario, a repeatable AI coding evaluation test that defines the
starting environment as well as evaluation success criteria.
@@ -106,6 +111,8 @@ def create(
Args:
input_context: The input context for the Scenario.
+ is_public: Whether this scenario is public.
+
name: Name of the scenario.
scoring_contract: The scoring contract for the Scenario.
@@ -133,6 +140,7 @@ def create(
body=maybe_transform(
{
"input_context": input_context,
+ "is_public": is_public,
"name": name,
"scoring_contract": scoring_contract,
"environment_parameters": environment_parameters,
@@ -148,7 +156,7 @@ def create(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=ScenarioView,
+ cast_to=ScenarioCreateResponse,
)
def retrieve(
@@ -161,7 +169,7 @@ def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ScenarioView:
+ ) -> ScenarioRetrieveResponse:
"""
Get a previously created scenario.
@@ -181,7 +189,7 @@ def retrieve(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ScenarioView,
+ cast_to=ScenarioRetrieveResponse,
)
def update(
@@ -189,6 +197,7 @@ def update(
id: str,
*,
input_context: InputContextParam,
+ is_public: bool,
name: str,
scoring_contract: ScoringContractParam,
environment_parameters: Optional[ScenarioEnvironmentParam] | NotGiven = NOT_GIVEN,
@@ -201,7 +210,7 @@ def update(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> ScenarioView:
+ ) -> ScenarioUpdateResponse:
"""
Update a Scenario, a repeatable AI coding evaluation test that defines the
starting environment as well as evaluation success criteria.
@@ -209,6 +218,8 @@ def update(
Args:
input_context: The input context for the Scenario.
+ is_public: Whether this scenario is public.
+
name: Name of the scenario.
scoring_contract: The scoring contract for the Scenario.
@@ -238,6 +249,7 @@ def update(
body=maybe_transform(
{
"input_context": input_context,
+ "is_public": is_public,
"name": name,
"scoring_contract": scoring_contract,
"environment_parameters": environment_parameters,
@@ -253,7 +265,7 @@ def update(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=ScenarioView,
+ cast_to=ScenarioUpdateResponse,
)
def list(
@@ -268,7 +280,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncScenariosCursorIDPage[ScenarioView]:
+ ) -> SyncScenariosCursorIDPage[ScenarioListResponse]:
"""List all Scenarios matching filter.
Args:
@@ -290,7 +302,7 @@ def list(
"""
return self._get_api_list(
"/v1/scenarios",
- page=SyncScenariosCursorIDPage[ScenarioView],
+ page=SyncScenariosCursorIDPage[ScenarioListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -305,7 +317,7 @@ def list(
scenario_list_params.ScenarioListParams,
),
),
- model=ScenarioView,
+ model=ScenarioListResponse,
)
def list_public(
@@ -320,7 +332,7 @@ def list_public(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncScenariosCursorIDPage[ScenarioView]:
+ ) -> SyncScenariosCursorIDPage[ScenarioListPublicResponse]:
"""
List all public scenarios matching filter.
@@ -341,7 +353,7 @@ def list_public(
"""
return self._get_api_list(
"/v1/scenarios/list_public",
- page=SyncScenariosCursorIDPage[ScenarioView],
+ page=SyncScenariosCursorIDPage[ScenarioListPublicResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -356,7 +368,7 @@ def list_public(
scenario_list_public_params.ScenarioListPublicParams,
),
),
- model=ScenarioView,
+ model=ScenarioListPublicResponse,
)
def start_run(
@@ -509,6 +521,7 @@ async def create(
self,
*,
input_context: InputContextParam,
+ is_public: bool,
name: str,
scoring_contract: ScoringContractParam,
environment_parameters: Optional[ScenarioEnvironmentParam] | NotGiven = NOT_GIVEN,
@@ -521,7 +534,7 @@ async def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> ScenarioView:
+ ) -> ScenarioCreateResponse:
"""
Create a Scenario, a repeatable AI coding evaluation test that defines the
starting environment as well as evaluation success criteria.
@@ -529,6 +542,8 @@ async def create(
Args:
input_context: The input context for the Scenario.
+ is_public: Whether this scenario is public.
+
name: Name of the scenario.
scoring_contract: The scoring contract for the Scenario.
@@ -556,6 +571,7 @@ async def create(
body=await async_maybe_transform(
{
"input_context": input_context,
+ "is_public": is_public,
"name": name,
"scoring_contract": scoring_contract,
"environment_parameters": environment_parameters,
@@ -571,7 +587,7 @@ async def create(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=ScenarioView,
+ cast_to=ScenarioCreateResponse,
)
async def retrieve(
@@ -584,7 +600,7 @@ async def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ScenarioView:
+ ) -> ScenarioRetrieveResponse:
"""
Get a previously created scenario.
@@ -604,7 +620,7 @@ async def retrieve(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ScenarioView,
+ cast_to=ScenarioRetrieveResponse,
)
async def update(
@@ -612,6 +628,7 @@ async def update(
id: str,
*,
input_context: InputContextParam,
+ is_public: bool,
name: str,
scoring_contract: ScoringContractParam,
environment_parameters: Optional[ScenarioEnvironmentParam] | NotGiven = NOT_GIVEN,
@@ -624,7 +641,7 @@ async def update(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
idempotency_key: str | None = None,
- ) -> ScenarioView:
+ ) -> ScenarioUpdateResponse:
"""
Update a Scenario, a repeatable AI coding evaluation test that defines the
starting environment as well as evaluation success criteria.
@@ -632,6 +649,8 @@ async def update(
Args:
input_context: The input context for the Scenario.
+ is_public: Whether this scenario is public.
+
name: Name of the scenario.
scoring_contract: The scoring contract for the Scenario.
@@ -661,6 +680,7 @@ async def update(
body=await async_maybe_transform(
{
"input_context": input_context,
+ "is_public": is_public,
"name": name,
"scoring_contract": scoring_contract,
"environment_parameters": environment_parameters,
@@ -676,7 +696,7 @@ async def update(
timeout=timeout,
idempotency_key=idempotency_key,
),
- cast_to=ScenarioView,
+ cast_to=ScenarioUpdateResponse,
)
def list(
@@ -691,7 +711,7 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[ScenarioView, AsyncScenariosCursorIDPage[ScenarioView]]:
+ ) -> AsyncPaginator[ScenarioListResponse, AsyncScenariosCursorIDPage[ScenarioListResponse]]:
"""List all Scenarios matching filter.
Args:
@@ -713,7 +733,7 @@ def list(
"""
return self._get_api_list(
"/v1/scenarios",
- page=AsyncScenariosCursorIDPage[ScenarioView],
+ page=AsyncScenariosCursorIDPage[ScenarioListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -728,7 +748,7 @@ def list(
scenario_list_params.ScenarioListParams,
),
),
- model=ScenarioView,
+ model=ScenarioListResponse,
)
def list_public(
@@ -743,7 +763,7 @@ def list_public(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[ScenarioView, AsyncScenariosCursorIDPage[ScenarioView]]:
+ ) -> AsyncPaginator[ScenarioListPublicResponse, AsyncScenariosCursorIDPage[ScenarioListPublicResponse]]:
"""
List all public scenarios matching filter.
@@ -764,7 +784,7 @@ def list_public(
"""
return self._get_api_list(
"/v1/scenarios/list_public",
- page=AsyncScenariosCursorIDPage[ScenarioView],
+ page=AsyncScenariosCursorIDPage[ScenarioListPublicResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -779,7 +799,7 @@ def list_public(
scenario_list_public_params.ScenarioListPublicParams,
),
),
- model=ScenarioView,
+ model=ScenarioListPublicResponse,
)
async def start_run(
diff --git a/src/runloop_api_client/resources/scenarios/scorers.py b/src/runloop_api_client/resources/scenarios/scorers.py
index 77f9393d6..c71bb028b 100644
--- a/src/runloop_api_client/resources/scenarios/scorers.py
+++ b/src/runloop_api_client/resources/scenarios/scorers.py
@@ -68,7 +68,7 @@ def create(
Args:
bash_script: Bash script for the custom scorer taking context as a json object
- $RL_TEST_CONTEXT.
+ $RL_SCORER_CONTEXT.
type: Name of the type of custom scorer.
@@ -153,7 +153,7 @@ def update(
Args:
bash_script: Bash script for the custom scorer taking context as a json object
- $RL_TEST_CONTEXT.
+ $RL_SCORER_CONTEXT.
type: Name of the type of custom scorer.
@@ -327,7 +327,7 @@ async def create(
Args:
bash_script: Bash script for the custom scorer taking context as a json object
- $RL_TEST_CONTEXT.
+ $RL_SCORER_CONTEXT.
type: Name of the type of custom scorer.
@@ -412,7 +412,7 @@ async def update(
Args:
bash_script: Bash script for the custom scorer taking context as a json object
- $RL_TEST_CONTEXT.
+ $RL_SCORER_CONTEXT.
type: Name of the type of custom scorer.
diff --git a/src/runloop_api_client/types/__init__.py b/src/runloop_api_client/types/__init__.py
index 31b76176f..88e630982 100644
--- a/src/runloop_api_client/types/__init__.py
+++ b/src/runloop_api_client/types/__init__.py
@@ -9,8 +9,6 @@
)
from .devbox_view import DevboxView as DevboxView
from .input_context import InputContext as InputContext
-from .scenario_view import ScenarioView as ScenarioView
-from .benchmark_view import BenchmarkView as BenchmarkView
from .blueprint_view import BlueprintView as BlueprintView
from .devbox_list_view import DevboxListView as DevboxListView
from .scoring_contract import ScoringContract as ScoringContract
@@ -19,8 +17,6 @@
from .benchmark_run_view import BenchmarkRunView as BenchmarkRunView
from .devbox_list_params import DevboxListParams as DevboxListParams
from .devbox_tunnel_view import DevboxTunnelView as DevboxTunnelView
-from .scenario_list_view import ScenarioListView as ScenarioListView
-from .benchmark_list_view import BenchmarkListView as BenchmarkListView
from .blueprint_build_log import BlueprintBuildLog as BlueprintBuildLog
from .blueprint_list_view import BlueprintListView as BlueprintListView
from .input_context_param import InputContextParam as InputContextParam
@@ -34,15 +30,20 @@
from .blueprint_preview_view import BlueprintPreviewView as BlueprintPreviewView
from .repository_list_params import RepositoryListParams as RepositoryListParams
from .scenario_create_params import ScenarioCreateParams as ScenarioCreateParams
+from .scenario_list_response import ScenarioListResponse as ScenarioListResponse
from .scenario_run_list_view import ScenarioRunListView as ScenarioRunListView
from .scenario_update_params import ScenarioUpdateParams as ScenarioUpdateParams
from .scoring_contract_param import ScoringContractParam as ScoringContractParam
from .scoring_function_param import ScoringFunctionParam as ScoringFunctionParam
from .benchmark_create_params import BenchmarkCreateParams as BenchmarkCreateParams
+from .benchmark_list_response import BenchmarkListResponse as BenchmarkListResponse
from .benchmark_run_list_view import BenchmarkRunListView as BenchmarkRunListView
from .blueprint_create_params import BlueprintCreateParams as BlueprintCreateParams
from .blueprint_preview_params import BlueprintPreviewParams as BlueprintPreviewParams
from .repository_create_params import RepositoryCreateParams as RepositoryCreateParams
+from .scenario_create_response import ScenarioCreateResponse as ScenarioCreateResponse
+from .scenario_update_response import ScenarioUpdateResponse as ScenarioUpdateResponse
+from .benchmark_create_response import BenchmarkCreateResponse as BenchmarkCreateResponse
from .devbox_snapshot_list_view import DevboxSnapshotListView as DevboxSnapshotListView
from .devbox_upload_file_params import DevboxUploadFileParams as DevboxUploadFileParams
from .scenario_start_run_params import ScenarioStartRunParams as ScenarioStartRunParams
@@ -52,6 +53,8 @@
from .repository_connection_view import RepositoryConnectionView as RepositoryConnectionView
from .repository_version_details import RepositoryVersionDetails as RepositoryVersionDetails
from .scenario_environment_param import ScenarioEnvironmentParam as ScenarioEnvironmentParam
+from .scenario_retrieve_response import ScenarioRetrieveResponse as ScenarioRetrieveResponse
+from .benchmark_retrieve_response import BenchmarkRetrieveResponse as BenchmarkRetrieveResponse
from .devbox_create_tunnel_params import DevboxCreateTunnelParams as DevboxCreateTunnelParams
from .devbox_download_file_params import DevboxDownloadFileParams as DevboxDownloadFileParams
from .devbox_execute_async_params import DevboxExecuteAsyncParams as DevboxExecuteAsyncParams
@@ -63,6 +66,8 @@
from .repository_version_list_view import RepositoryVersionListView as RepositoryVersionListView
from .scoring_contract_result_view import ScoringContractResultView as ScoringContractResultView
from .scoring_function_result_view import ScoringFunctionResultView as ScoringFunctionResultView
+from .scenario_list_public_response import ScenarioListPublicResponse as ScenarioListPublicResponse
+from .benchmark_list_public_response import BenchmarkListPublicResponse as BenchmarkListPublicResponse
from .blueprint_build_logs_list_view import BlueprintBuildLogsListView as BlueprintBuildLogsListView
from .devbox_create_ssh_key_response import DevboxCreateSSHKeyResponse as DevboxCreateSSHKeyResponse
from .repository_connection_list_view import RepositoryConnectionListView as RepositoryConnectionListView
diff --git a/src/runloop_api_client/types/benchmark_create_params.py b/src/runloop_api_client/types/benchmark_create_params.py
index 1597e3768..27aabe737 100644
--- a/src/runloop_api_client/types/benchmark_create_params.py
+++ b/src/runloop_api_client/types/benchmark_create_params.py
@@ -9,6 +9,9 @@
class BenchmarkCreateParams(TypedDict, total=False):
+ is_public: Required[bool]
+ """Whether this benchmark is public."""
+
name: Required[str]
"""The name of the Benchmark."""
diff --git a/src/runloop_api_client/types/benchmark_create_response.py b/src/runloop_api_client/types/benchmark_create_response.py
new file mode 100644
index 000000000..b0c62817a
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_create_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["BenchmarkCreateResponse"]
+
+
+class BenchmarkCreateResponse(BaseModel):
+ id: str
+ """The ID of the Benchmark."""
+
+ metadata: Dict[str, str]
+ """User defined metadata to attach to the benchmark for organization."""
+
+ name: str
+ """The name of the Benchmark."""
+
+ scenario_ids: List[str] = FieldInfo(alias="scenarioIds")
+ """List of Scenario IDs that make up the benchmark."""
+
+ is_public: Optional[bool] = None
+ """Whether this benchmark is public."""
diff --git a/src/runloop_api_client/types/benchmark_list_public_response.py b/src/runloop_api_client/types/benchmark_list_public_response.py
new file mode 100644
index 000000000..4df643f8d
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_list_public_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["BenchmarkListPublicResponse"]
+
+
+class BenchmarkListPublicResponse(BaseModel):
+ id: str
+ """The ID of the Benchmark."""
+
+ metadata: Dict[str, str]
+ """User defined metadata to attach to the benchmark for organization."""
+
+ name: str
+ """The name of the Benchmark."""
+
+ scenario_ids: List[str] = FieldInfo(alias="scenarioIds")
+ """List of Scenario IDs that make up the benchmark."""
+
+ is_public: Optional[bool] = None
+ """Whether this benchmark is public."""
diff --git a/src/runloop_api_client/types/benchmark_view.py b/src/runloop_api_client/types/benchmark_list_response.py
similarity index 71%
rename from src/runloop_api_client/types/benchmark_view.py
rename to src/runloop_api_client/types/benchmark_list_response.py
index 7f2f1e1e3..2578ec19c 100644
--- a/src/runloop_api_client/types/benchmark_view.py
+++ b/src/runloop_api_client/types/benchmark_list_response.py
@@ -1,15 +1,15 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List
+from typing import Dict, List, Optional
from pydantic import Field as FieldInfo
from .._models import BaseModel
-__all__ = ["BenchmarkView"]
+__all__ = ["BenchmarkListResponse"]
-class BenchmarkView(BaseModel):
+class BenchmarkListResponse(BaseModel):
id: str
"""The ID of the Benchmark."""
@@ -21,3 +21,6 @@ class BenchmarkView(BaseModel):
scenario_ids: List[str] = FieldInfo(alias="scenarioIds")
"""List of Scenario IDs that make up the benchmark."""
+
+ is_public: Optional[bool] = None
+ """Whether this benchmark is public."""
diff --git a/src/runloop_api_client/types/benchmark_list_view.py b/src/runloop_api_client/types/benchmark_list_view.py
deleted file mode 100644
index 225002840..000000000
--- a/src/runloop_api_client/types/benchmark_list_view.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .benchmark_view import BenchmarkView
-
-__all__ = ["BenchmarkListView"]
-
-
-class BenchmarkListView(BaseModel):
- benchmarks: List[BenchmarkView]
- """List of Benchmarks matching filter."""
-
- has_more: bool
-
- remaining_count: int
-
- total_count: int
diff --git a/src/runloop_api_client/types/benchmark_retrieve_response.py b/src/runloop_api_client/types/benchmark_retrieve_response.py
new file mode 100644
index 000000000..c704a6209
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_retrieve_response.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["BenchmarkRetrieveResponse"]
+
+
+class BenchmarkRetrieveResponse(BaseModel):
+ id: str
+ """The ID of the Benchmark."""
+
+ metadata: Dict[str, str]
+ """User defined metadata to attach to the benchmark for organization."""
+
+ name: str
+ """The name of the Benchmark."""
+
+ scenario_ids: List[str] = FieldInfo(alias="scenarioIds")
+ """List of Scenario IDs that make up the benchmark."""
+
+ is_public: Optional[bool] = None
+ """Whether this benchmark is public."""
diff --git a/src/runloop_api_client/types/benchmark_run_view.py b/src/runloop_api_client/types/benchmark_run_view.py
index 9bfbd3858..9239985aa 100644
--- a/src/runloop_api_client/types/benchmark_run_view.py
+++ b/src/runloop_api_client/types/benchmark_run_view.py
@@ -3,9 +3,23 @@
from typing import Dict, List, Optional
from typing_extensions import Literal
+from pydantic import Field as FieldInfo
+
from .._models import BaseModel
+from .scoring_contract_result_view import ScoringContractResultView
+
+__all__ = ["BenchmarkRunView", "ScenarioRun"]
+
-__all__ = ["BenchmarkRunView"]
+class ScenarioRun(BaseModel):
+ scenario_id: str
+ """ID of the Scenario that has been run."""
+
+ scoring_result: ScoringContractResultView = FieldInfo(alias="scoringResult")
+ """The scoring result of the ScenarioRun."""
+
+ scenario_run_id: Optional[str] = FieldInfo(alias="scenarioRunId", default=None)
+ """ID of the scenario run."""
class BenchmarkRunView(BaseModel):
@@ -21,6 +35,9 @@ class BenchmarkRunView(BaseModel):
pending_scenarios: List[str]
"""List of Scenarios that need to be completed before benchmark can be completed."""
+ scenario_runs: List[ScenarioRun]
+ """List of Scenarios have been completed."""
+
start_time_ms: int
"""The time the benchmark run execution started (Unix timestamp milliseconds)."""
diff --git a/src/runloop_api_client/types/blueprint_view.py b/src/runloop_api_client/types/blueprint_view.py
index 0840fff31..a6cd9c412 100644
--- a/src/runloop_api_client/types/blueprint_view.py
+++ b/src/runloop_api_client/types/blueprint_view.py
@@ -22,6 +22,9 @@ class BlueprintView(BaseModel):
parameters: BlueprintBuildParameters
"""The parameters used to create Blueprint."""
+ state: Literal["created", "deleted"]
+ """The state of the Blueprint."""
+
status: Literal["provisioning", "building", "failed", "build_complete"]
"""The status of the Blueprint build."""
diff --git a/src/runloop_api_client/types/scenario_create_params.py b/src/runloop_api_client/types/scenario_create_params.py
index 5bd2f3d90..b311c62b3 100644
--- a/src/runloop_api_client/types/scenario_create_params.py
+++ b/src/runloop_api_client/types/scenario_create_params.py
@@ -16,6 +16,9 @@ class ScenarioCreateParams(TypedDict, total=False):
input_context: Required[InputContextParam]
"""The input context for the Scenario."""
+ is_public: Required[bool]
+ """Whether this scenario is public."""
+
name: Required[str]
"""Name of the scenario."""
diff --git a/src/runloop_api_client/types/scenario_create_response.py b/src/runloop_api_client/types/scenario_create_response.py
new file mode 100644
index 000000000..7ab062f96
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_create_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+
+from .._models import BaseModel
+from .input_context import InputContext
+from .scoring_contract import ScoringContract
+from .scenario_environment import ScenarioEnvironment
+
+__all__ = ["ScenarioCreateResponse"]
+
+
+class ScenarioCreateResponse(BaseModel):
+ id: str
+ """The ID of the Scenario."""
+
+ input_context: InputContext
+ """The input context for the Scenario."""
+
+ metadata: Dict[str, str]
+ """User defined metadata to attach to the scenario for organization."""
+
+ name: str
+ """The name of the Scenario."""
+
+ scoring_contract: ScoringContract
+ """The scoring contract for the Scenario."""
+
+ environment: Optional[ScenarioEnvironment] = None
+ """The Environment in which the Scenario is run."""
+
+ is_public: Optional[bool] = None
+ """Whether this scenario is public."""
+
+ reference_output: Optional[str] = None
+ """A string representation of the reference output to solve the scenario.
+
+ Commonly can be the result of a git diff or a sequence of command actions to
+ apply to the environment.
+ """
diff --git a/src/runloop_api_client/types/scenario_list_public_response.py b/src/runloop_api_client/types/scenario_list_public_response.py
new file mode 100644
index 000000000..8fa375956
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_list_public_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+
+from .._models import BaseModel
+from .input_context import InputContext
+from .scoring_contract import ScoringContract
+from .scenario_environment import ScenarioEnvironment
+
+__all__ = ["ScenarioListPublicResponse"]
+
+
+class ScenarioListPublicResponse(BaseModel):
+ id: str
+ """The ID of the Scenario."""
+
+ input_context: InputContext
+ """The input context for the Scenario."""
+
+ metadata: Dict[str, str]
+ """User defined metadata to attach to the scenario for organization."""
+
+ name: str
+ """The name of the Scenario."""
+
+ scoring_contract: ScoringContract
+ """The scoring contract for the Scenario."""
+
+ environment: Optional[ScenarioEnvironment] = None
+ """The Environment in which the Scenario is run."""
+
+ is_public: Optional[bool] = None
+ """Whether this scenario is public."""
+
+ reference_output: Optional[str] = None
+ """A string representation of the reference output to solve the scenario.
+
+ Commonly can be the result of a git diff or a sequence of command actions to
+ apply to the environment.
+ """
diff --git a/src/runloop_api_client/types/scenario_view.py b/src/runloop_api_client/types/scenario_list_response.py
similarity index 86%
rename from src/runloop_api_client/types/scenario_view.py
rename to src/runloop_api_client/types/scenario_list_response.py
index 4ba20d9c9..1d7097172 100644
--- a/src/runloop_api_client/types/scenario_view.py
+++ b/src/runloop_api_client/types/scenario_list_response.py
@@ -7,10 +7,10 @@
from .scoring_contract import ScoringContract
from .scenario_environment import ScenarioEnvironment
-__all__ = ["ScenarioView"]
+__all__ = ["ScenarioListResponse"]
-class ScenarioView(BaseModel):
+class ScenarioListResponse(BaseModel):
id: str
"""The ID of the Scenario."""
@@ -29,6 +29,9 @@ class ScenarioView(BaseModel):
environment: Optional[ScenarioEnvironment] = None
"""The Environment in which the Scenario is run."""
+ is_public: Optional[bool] = None
+ """Whether this scenario is public."""
+
reference_output: Optional[str] = None
"""A string representation of the reference output to solve the scenario.
diff --git a/src/runloop_api_client/types/scenario_list_view.py b/src/runloop_api_client/types/scenario_list_view.py
deleted file mode 100644
index b11272649..000000000
--- a/src/runloop_api_client/types/scenario_list_view.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-from .scenario_view import ScenarioView
-
-__all__ = ["ScenarioListView"]
-
-
-class ScenarioListView(BaseModel):
- has_more: bool
-
- remaining_count: int
-
- scenarios: List[ScenarioView]
- """List of Scenarios matching filter."""
-
- total_count: int
diff --git a/src/runloop_api_client/types/scenario_retrieve_response.py b/src/runloop_api_client/types/scenario_retrieve_response.py
new file mode 100644
index 000000000..21297daf1
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_retrieve_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+
+from .._models import BaseModel
+from .input_context import InputContext
+from .scoring_contract import ScoringContract
+from .scenario_environment import ScenarioEnvironment
+
+__all__ = ["ScenarioRetrieveResponse"]
+
+
+class ScenarioRetrieveResponse(BaseModel):
+ id: str
+ """The ID of the Scenario."""
+
+ input_context: InputContext
+ """The input context for the Scenario."""
+
+ metadata: Dict[str, str]
+ """User defined metadata to attach to the scenario for organization."""
+
+ name: str
+ """The name of the Scenario."""
+
+ scoring_contract: ScoringContract
+ """The scoring contract for the Scenario."""
+
+ environment: Optional[ScenarioEnvironment] = None
+ """The Environment in which the Scenario is run."""
+
+ is_public: Optional[bool] = None
+ """Whether this scenario is public."""
+
+ reference_output: Optional[str] = None
+ """A string representation of the reference output to solve the scenario.
+
+ Commonly can be the result of a git diff or a sequence of command actions to
+ apply to the environment.
+ """
diff --git a/src/runloop_api_client/types/scenario_run_view.py b/src/runloop_api_client/types/scenario_run_view.py
index 893279409..71c805446 100644
--- a/src/runloop_api_client/types/scenario_run_view.py
+++ b/src/runloop_api_client/types/scenario_run_view.py
@@ -35,7 +35,7 @@ class ScenarioRunView(BaseModel):
"""Optional name of ScenarioRun."""
scoring_contract_result: Optional[ScoringContractResultView] = None
- """The input context for the Scenario."""
+ """The scoring result of the ScenarioRun."""
start_time_ms: Optional[int] = None
"""The time that the scenario started"""
diff --git a/src/runloop_api_client/types/scenario_update_params.py b/src/runloop_api_client/types/scenario_update_params.py
index 1004e7bc8..4c0ef1ad8 100644
--- a/src/runloop_api_client/types/scenario_update_params.py
+++ b/src/runloop_api_client/types/scenario_update_params.py
@@ -16,6 +16,9 @@ class ScenarioUpdateParams(TypedDict, total=False):
input_context: Required[InputContextParam]
"""The input context for the Scenario."""
+ is_public: Required[bool]
+ """Whether this scenario is public."""
+
name: Required[str]
"""Name of the scenario."""
diff --git a/src/runloop_api_client/types/scenario_update_response.py b/src/runloop_api_client/types/scenario_update_response.py
new file mode 100644
index 000000000..dd8617f71
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_update_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+
+from .._models import BaseModel
+from .input_context import InputContext
+from .scoring_contract import ScoringContract
+from .scenario_environment import ScenarioEnvironment
+
+__all__ = ["ScenarioUpdateResponse"]
+
+
+class ScenarioUpdateResponse(BaseModel):
+ id: str
+ """The ID of the Scenario."""
+
+ input_context: InputContext
+ """The input context for the Scenario."""
+
+ metadata: Dict[str, str]
+ """User defined metadata to attach to the scenario for organization."""
+
+ name: str
+ """The name of the Scenario."""
+
+ scoring_contract: ScoringContract
+ """The scoring contract for the Scenario."""
+
+ environment: Optional[ScenarioEnvironment] = None
+ """The Environment in which the Scenario is run."""
+
+ is_public: Optional[bool] = None
+ """Whether this scenario is public."""
+
+ reference_output: Optional[str] = None
+ """A string representation of the reference output to solve the scenario.
+
+ Commonly can be the result of a git diff or a sequence of command actions to
+ apply to the environment.
+ """
diff --git a/src/runloop_api_client/types/scenarios/scorer_create_params.py b/src/runloop_api_client/types/scenarios/scorer_create_params.py
index 4ba0d6d4d..62a4e33f9 100644
--- a/src/runloop_api_client/types/scenarios/scorer_create_params.py
+++ b/src/runloop_api_client/types/scenarios/scorer_create_params.py
@@ -11,7 +11,7 @@ class ScorerCreateParams(TypedDict, total=False):
bash_script: Required[str]
"""
Bash script for the custom scorer taking context as a json object
- $RL_TEST_CONTEXT.
+ $RL_SCORER_CONTEXT.
"""
type: Required[str]
diff --git a/src/runloop_api_client/types/scenarios/scorer_update_params.py b/src/runloop_api_client/types/scenarios/scorer_update_params.py
index 3637f4b05..dcc7816a4 100644
--- a/src/runloop_api_client/types/scenarios/scorer_update_params.py
+++ b/src/runloop_api_client/types/scenarios/scorer_update_params.py
@@ -11,7 +11,7 @@ class ScorerUpdateParams(TypedDict, total=False):
bash_script: Required[str]
"""
Bash script for the custom scorer taking context as a json object
- $RL_TEST_CONTEXT.
+ $RL_SCORER_CONTEXT.
"""
type: Required[str]
diff --git a/src/runloop_api_client/types/shared/launch_parameters.py b/src/runloop_api_client/types/shared/launch_parameters.py
index 6a93c7ef5..ce15b90b0 100644
--- a/src/runloop_api_client/types/shared/launch_parameters.py
+++ b/src/runloop_api_client/types/shared/launch_parameters.py
@@ -38,5 +38,7 @@ class LaunchParameters(BaseModel):
launch_commands: Optional[List[str]] = None
"""Set of commands to be run at launch time, before the entrypoint process is run."""
- resource_size_request: Optional[Literal["SMALL", "MEDIUM", "LARGE", "X_LARGE", "XX_LARGE", "CUSTOM_SIZE"]] = None
+ resource_size_request: Optional[
+ Literal["X_SMALL", "SMALL", "MEDIUM", "LARGE", "X_LARGE", "XX_LARGE", "CUSTOM_SIZE"]
+ ] = None
"""Manual resource configuration for Devbox. If not set, defaults will be used."""
diff --git a/src/runloop_api_client/types/shared_params/launch_parameters.py b/src/runloop_api_client/types/shared_params/launch_parameters.py
index 549b3dbaa..76aa7393f 100644
--- a/src/runloop_api_client/types/shared_params/launch_parameters.py
+++ b/src/runloop_api_client/types/shared_params/launch_parameters.py
@@ -39,5 +39,7 @@ class LaunchParameters(TypedDict, total=False):
launch_commands: Optional[List[str]]
"""Set of commands to be run at launch time, before the entrypoint process is run."""
- resource_size_request: Optional[Literal["SMALL", "MEDIUM", "LARGE", "X_LARGE", "XX_LARGE", "CUSTOM_SIZE"]]
+ resource_size_request: Optional[
+ Literal["X_SMALL", "SMALL", "MEDIUM", "LARGE", "X_LARGE", "XX_LARGE", "CUSTOM_SIZE"]
+ ]
"""Manual resource configuration for Devbox. If not set, defaults will be used."""
diff --git a/tests/api_resources/scenarios/test_scorers.py b/tests/api_resources/scenarios/test_scorers.py
index f848d8798..0ac377ede 100644
--- a/tests/api_resources/scenarios/test_scorers.py
+++ b/tests/api_resources/scenarios/test_scorers.py
@@ -200,7 +200,7 @@ def test_method_validate_with_all_params(self, client: Runloop) -> None:
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
"prebuilt_id": "prebuilt_id",
"snapshot_id": "snapshot_id",
@@ -423,7 +423,7 @@ async def test_method_validate_with_all_params(self, async_client: AsyncRunloop)
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
"prebuilt_id": "prebuilt_id",
"snapshot_id": "snapshot_id",
diff --git a/tests/api_resources/test_benchmarks.py b/tests/api_resources/test_benchmarks.py
index 087dffb1e..7c2d47be9 100644
--- a/tests/api_resources/test_benchmarks.py
+++ b/tests/api_resources/test_benchmarks.py
@@ -10,8 +10,11 @@
from tests.utils import assert_matches_type
from runloop_api_client import Runloop, AsyncRunloop
from runloop_api_client.types import (
- BenchmarkView,
BenchmarkRunView,
+ BenchmarkListResponse,
+ BenchmarkCreateResponse,
+ BenchmarkRetrieveResponse,
+ BenchmarkListPublicResponse,
)
from runloop_api_client.pagination import SyncBenchmarksCursorIDPage, AsyncBenchmarksCursorIDPage
@@ -24,40 +27,44 @@ class TestBenchmarks:
@parametrize
def test_method_create(self, client: Runloop) -> None:
benchmark = client.benchmarks.create(
+ is_public=True,
name="name",
)
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: Runloop) -> None:
benchmark = client.benchmarks.create(
+ is_public=True,
name="name",
metadata={"foo": "string"},
scenario_ids=["string"],
)
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
@parametrize
def test_raw_response_create(self, client: Runloop) -> None:
response = client.benchmarks.with_raw_response.create(
+ is_public=True,
name="name",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
@parametrize
def test_streaming_response_create(self, client: Runloop) -> None:
with client.benchmarks.with_streaming_response.create(
+ is_public=True,
name="name",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -66,7 +73,7 @@ def test_method_retrieve(self, client: Runloop) -> None:
benchmark = client.benchmarks.retrieve(
"id",
)
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkRetrieveResponse, benchmark, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: Runloop) -> None:
@@ -77,7 +84,7 @@ def test_raw_response_retrieve(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkRetrieveResponse, benchmark, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: Runloop) -> None:
@@ -88,7 +95,7 @@ def test_streaming_response_retrieve(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkRetrieveResponse, benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -102,7 +109,7 @@ def test_path_params_retrieve(self, client: Runloop) -> None:
@parametrize
def test_method_list(self, client: Runloop) -> None:
benchmark = client.benchmarks.list()
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: Runloop) -> None:
@@ -110,7 +117,7 @@ def test_method_list_with_all_params(self, client: Runloop) -> None:
limit=0,
starting_after="starting_after",
)
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
@parametrize
def test_raw_response_list(self, client: Runloop) -> None:
@@ -119,7 +126,7 @@ def test_raw_response_list(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
@parametrize
def test_streaming_response_list(self, client: Runloop) -> None:
@@ -128,14 +135,14 @@ def test_streaming_response_list(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_list_public(self, client: Runloop) -> None:
benchmark = client.benchmarks.list_public()
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
@parametrize
def test_method_list_public_with_all_params(self, client: Runloop) -> None:
@@ -143,7 +150,7 @@ def test_method_list_public_with_all_params(self, client: Runloop) -> None:
limit=0,
starting_after="starting_after",
)
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
@parametrize
def test_raw_response_list_public(self, client: Runloop) -> None:
@@ -152,7 +159,7 @@ def test_raw_response_list_public(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
@parametrize
def test_streaming_response_list_public(self, client: Runloop) -> None:
@@ -161,7 +168,7 @@ def test_streaming_response_list_public(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = response.parse()
- assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(SyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -212,40 +219,44 @@ class TestAsyncBenchmarks:
@parametrize
async def test_method_create(self, async_client: AsyncRunloop) -> None:
benchmark = await async_client.benchmarks.create(
+ is_public=True,
name="name",
)
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -> None:
benchmark = await async_client.benchmarks.create(
+ is_public=True,
name="name",
metadata={"foo": "string"},
scenario_ids=["string"],
)
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncRunloop) -> None:
response = await async_client.benchmarks.with_raw_response.create(
+ is_public=True,
name="name",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncRunloop) -> None:
async with async_client.benchmarks.with_streaming_response.create(
+ is_public=True,
name="name",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkCreateResponse, benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -254,7 +265,7 @@ async def test_method_retrieve(self, async_client: AsyncRunloop) -> None:
benchmark = await async_client.benchmarks.retrieve(
"id",
)
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkRetrieveResponse, benchmark, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
@@ -265,7 +276,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkRetrieveResponse, benchmark, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) -> None:
@@ -276,7 +287,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) ->
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(BenchmarkView, benchmark, path=["response"])
+ assert_matches_type(BenchmarkRetrieveResponse, benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -290,7 +301,7 @@ async def test_path_params_retrieve(self, async_client: AsyncRunloop) -> None:
@parametrize
async def test_method_list(self, async_client: AsyncRunloop) -> None:
benchmark = await async_client.benchmarks.list()
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncRunloop) -> None:
@@ -298,7 +309,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncRunloop) ->
limit=0,
starting_after="starting_after",
)
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
@@ -307,7 +318,7 @@ async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None:
@@ -316,14 +327,14 @@ async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListResponse], benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_list_public(self, async_client: AsyncRunloop) -> None:
benchmark = await async_client.benchmarks.list_public()
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
@parametrize
async def test_method_list_public_with_all_params(self, async_client: AsyncRunloop) -> None:
@@ -331,7 +342,7 @@ async def test_method_list_public_with_all_params(self, async_client: AsyncRunlo
limit=0,
starting_after="starting_after",
)
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
@parametrize
async def test_raw_response_list_public(self, async_client: AsyncRunloop) -> None:
@@ -340,7 +351,7 @@ async def test_raw_response_list_public(self, async_client: AsyncRunloop) -> Non
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
@parametrize
async def test_streaming_response_list_public(self, async_client: AsyncRunloop) -> None:
@@ -349,7 +360,7 @@ async def test_streaming_response_list_public(self, async_client: AsyncRunloop)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
benchmark = await response.parse()
- assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkView], benchmark, path=["response"])
+ assert_matches_type(AsyncBenchmarksCursorIDPage[BenchmarkListPublicResponse], benchmark, path=["response"])
assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_blueprints.py b/tests/api_resources/test_blueprints.py
index 5a9fc89e9..5e33440e5 100644
--- a/tests/api_resources/test_blueprints.py
+++ b/tests/api_resources/test_blueprints.py
@@ -53,7 +53,7 @@ def test_method_create_with_all_params(self, client: Runloop) -> None:
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
system_setup_commands=["string"],
)
@@ -155,6 +155,44 @@ def test_streaming_response_list(self, client: Runloop) -> None:
assert cast(Any, response.is_closed) is True
+ @parametrize
+ def test_method_delete(self, client: Runloop) -> None:
+ blueprint = client.blueprints.delete(
+ "id",
+ )
+ assert_matches_type(object, blueprint, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: Runloop) -> None:
+ response = client.blueprints.with_raw_response.delete(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ blueprint = response.parse()
+ assert_matches_type(object, blueprint, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: Runloop) -> None:
+ with client.blueprints.with_streaming_response.delete(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ blueprint = response.parse()
+ assert_matches_type(object, blueprint, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.blueprints.with_raw_response.delete(
+ "",
+ )
+
@parametrize
def test_method_logs(self, client: Runloop) -> None:
blueprint = client.blueprints.logs(
@@ -224,7 +262,7 @@ def test_method_preview_with_all_params(self, client: Runloop) -> None:
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
system_setup_commands=["string"],
)
@@ -289,7 +327,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
system_setup_commands=["string"],
)
@@ -391,6 +429,44 @@ async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None
assert cast(Any, response.is_closed) is True
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncRunloop) -> None:
+ blueprint = await async_client.blueprints.delete(
+ "id",
+ )
+ assert_matches_type(object, blueprint, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.blueprints.with_raw_response.delete(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ blueprint = await response.parse()
+ assert_matches_type(object, blueprint, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncRunloop) -> None:
+ async with async_client.blueprints.with_streaming_response.delete(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ blueprint = await response.parse()
+ assert_matches_type(object, blueprint, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.blueprints.with_raw_response.delete(
+ "",
+ )
+
@parametrize
async def test_method_logs(self, async_client: AsyncRunloop) -> None:
blueprint = await async_client.blueprints.logs(
@@ -460,7 +536,7 @@ async def test_method_preview_with_all_params(self, async_client: AsyncRunloop)
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
system_setup_commands=["string"],
)
diff --git a/tests/api_resources/test_devboxes.py b/tests/api_resources/test_devboxes.py
index ce6638c01..b0bf4b4a5 100644
--- a/tests/api_resources/test_devboxes.py
+++ b/tests/api_resources/test_devboxes.py
@@ -69,7 +69,7 @@ def test_method_create_with_all_params(self, client: Runloop) -> None:
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
metadata={"foo": "string"},
name="name",
@@ -615,7 +615,7 @@ def test_method_remove_tunnel(self, client: Runloop) -> None:
id="id",
port=0,
)
- assert_matches_type(DevboxTunnelView, devbox, path=["response"])
+ assert_matches_type(object, devbox, path=["response"])
@parametrize
def test_raw_response_remove_tunnel(self, client: Runloop) -> None:
@@ -627,7 +627,7 @@ def test_raw_response_remove_tunnel(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
devbox = response.parse()
- assert_matches_type(DevboxTunnelView, devbox, path=["response"])
+ assert_matches_type(object, devbox, path=["response"])
@parametrize
def test_streaming_response_remove_tunnel(self, client: Runloop) -> None:
@@ -639,7 +639,7 @@ def test_streaming_response_remove_tunnel(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
devbox = response.parse()
- assert_matches_type(DevboxTunnelView, devbox, path=["response"])
+ assert_matches_type(object, devbox, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -944,7 +944,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
metadata={"foo": "string"},
name="name",
@@ -1490,7 +1490,7 @@ async def test_method_remove_tunnel(self, async_client: AsyncRunloop) -> None:
id="id",
port=0,
)
- assert_matches_type(DevboxTunnelView, devbox, path=["response"])
+ assert_matches_type(object, devbox, path=["response"])
@parametrize
async def test_raw_response_remove_tunnel(self, async_client: AsyncRunloop) -> None:
@@ -1502,7 +1502,7 @@ async def test_raw_response_remove_tunnel(self, async_client: AsyncRunloop) -> N
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
devbox = await response.parse()
- assert_matches_type(DevboxTunnelView, devbox, path=["response"])
+ assert_matches_type(object, devbox, path=["response"])
@parametrize
async def test_streaming_response_remove_tunnel(self, async_client: AsyncRunloop) -> None:
@@ -1514,7 +1514,7 @@ async def test_streaming_response_remove_tunnel(self, async_client: AsyncRunloop
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
devbox = await response.parse()
- assert_matches_type(DevboxTunnelView, devbox, path=["response"])
+ assert_matches_type(object, devbox, path=["response"])
assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_scenarios.py b/tests/api_resources/test_scenarios.py
index a19110235..2d6d7048f 100644
--- a/tests/api_resources/test_scenarios.py
+++ b/tests/api_resources/test_scenarios.py
@@ -10,8 +10,12 @@
from tests.utils import assert_matches_type
from runloop_api_client import Runloop, AsyncRunloop
from runloop_api_client.types import (
- ScenarioView,
ScenarioRunView,
+ ScenarioListResponse,
+ ScenarioCreateResponse,
+ ScenarioUpdateResponse,
+ ScenarioRetrieveResponse,
+ ScenarioListPublicResponse,
)
from runloop_api_client.pagination import SyncScenariosCursorIDPage, AsyncScenariosCursorIDPage
@@ -25,6 +29,7 @@ class TestScenarios:
def test_method_create(self, client: Runloop) -> None:
scenario = client.scenarios.create(
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -40,7 +45,7 @@ def test_method_create(self, client: Runloop) -> None:
]
},
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: Runloop) -> None:
@@ -49,6 +54,7 @@ def test_method_create_with_all_params(self, client: Runloop) -> None:
"problem_statement": "problem_statement",
"additional_context": {},
},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -76,7 +82,7 @@ def test_method_create_with_all_params(self, client: Runloop) -> None:
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
"prebuilt_id": "prebuilt_id",
"snapshot_id": "snapshot_id",
@@ -85,12 +91,13 @@ def test_method_create_with_all_params(self, client: Runloop) -> None:
metadata={"foo": "string"},
reference_output="reference_output",
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
@parametrize
def test_raw_response_create(self, client: Runloop) -> None:
response = client.scenarios.with_raw_response.create(
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -110,12 +117,13 @@ def test_raw_response_create(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
@parametrize
def test_streaming_response_create(self, client: Runloop) -> None:
with client.scenarios.with_streaming_response.create(
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -135,7 +143,7 @@ def test_streaming_response_create(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -144,7 +152,7 @@ def test_method_retrieve(self, client: Runloop) -> None:
scenario = client.scenarios.retrieve(
"id",
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioRetrieveResponse, scenario, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: Runloop) -> None:
@@ -155,7 +163,7 @@ def test_raw_response_retrieve(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioRetrieveResponse, scenario, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: Runloop) -> None:
@@ -166,7 +174,7 @@ def test_streaming_response_retrieve(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioRetrieveResponse, scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -182,6 +190,7 @@ def test_method_update(self, client: Runloop) -> None:
scenario = client.scenarios.update(
id="id",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -197,7 +206,7 @@ def test_method_update(self, client: Runloop) -> None:
]
},
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
@parametrize
def test_method_update_with_all_params(self, client: Runloop) -> None:
@@ -207,6 +216,7 @@ def test_method_update_with_all_params(self, client: Runloop) -> None:
"problem_statement": "problem_statement",
"additional_context": {},
},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -234,7 +244,7 @@ def test_method_update_with_all_params(self, client: Runloop) -> None:
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
"prebuilt_id": "prebuilt_id",
"snapshot_id": "snapshot_id",
@@ -243,13 +253,14 @@ def test_method_update_with_all_params(self, client: Runloop) -> None:
metadata={"foo": "string"},
reference_output="reference_output",
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
@parametrize
def test_raw_response_update(self, client: Runloop) -> None:
response = client.scenarios.with_raw_response.update(
id="id",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -269,13 +280,14 @@ def test_raw_response_update(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
@parametrize
def test_streaming_response_update(self, client: Runloop) -> None:
with client.scenarios.with_streaming_response.update(
id="id",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -295,7 +307,7 @@ def test_streaming_response_update(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -305,6 +317,7 @@ def test_path_params_update(self, client: Runloop) -> None:
client.scenarios.with_raw_response.update(
id="",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -324,7 +337,7 @@ def test_path_params_update(self, client: Runloop) -> None:
@parametrize
def test_method_list(self, client: Runloop) -> None:
scenario = client.scenarios.list()
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: Runloop) -> None:
@@ -333,7 +346,7 @@ def test_method_list_with_all_params(self, client: Runloop) -> None:
name="name",
starting_after="starting_after",
)
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
@parametrize
def test_raw_response_list(self, client: Runloop) -> None:
@@ -342,7 +355,7 @@ def test_raw_response_list(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
@parametrize
def test_streaming_response_list(self, client: Runloop) -> None:
@@ -351,14 +364,14 @@ def test_streaming_response_list(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_list_public(self, client: Runloop) -> None:
scenario = client.scenarios.list_public()
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
@parametrize
def test_method_list_public_with_all_params(self, client: Runloop) -> None:
@@ -367,7 +380,7 @@ def test_method_list_public_with_all_params(self, client: Runloop) -> None:
name="name",
starting_after="starting_after",
)
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
@parametrize
def test_raw_response_list_public(self, client: Runloop) -> None:
@@ -376,7 +389,7 @@ def test_raw_response_list_public(self, client: Runloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
@parametrize
def test_streaming_response_list_public(self, client: Runloop) -> None:
@@ -385,7 +398,7 @@ def test_streaming_response_list_public(self, client: Runloop) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = response.parse()
- assert_matches_type(SyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(SyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -438,6 +451,7 @@ class TestAsyncScenarios:
async def test_method_create(self, async_client: AsyncRunloop) -> None:
scenario = await async_client.scenarios.create(
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -453,7 +467,7 @@ async def test_method_create(self, async_client: AsyncRunloop) -> None:
]
},
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -> None:
@@ -462,6 +476,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -
"problem_statement": "problem_statement",
"additional_context": {},
},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -489,7 +504,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
"prebuilt_id": "prebuilt_id",
"snapshot_id": "snapshot_id",
@@ -498,12 +513,13 @@ async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -
metadata={"foo": "string"},
reference_output="reference_output",
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncRunloop) -> None:
response = await async_client.scenarios.with_raw_response.create(
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -523,12 +539,13 @@ async def test_raw_response_create(self, async_client: AsyncRunloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncRunloop) -> None:
async with async_client.scenarios.with_streaming_response.create(
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -548,7 +565,7 @@ async def test_streaming_response_create(self, async_client: AsyncRunloop) -> No
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioCreateResponse, scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -557,7 +574,7 @@ async def test_method_retrieve(self, async_client: AsyncRunloop) -> None:
scenario = await async_client.scenarios.retrieve(
"id",
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioRetrieveResponse, scenario, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
@@ -568,7 +585,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioRetrieveResponse, scenario, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) -> None:
@@ -579,7 +596,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) ->
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioRetrieveResponse, scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -595,6 +612,7 @@ async def test_method_update(self, async_client: AsyncRunloop) -> None:
scenario = await async_client.scenarios.update(
id="id",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -610,7 +628,7 @@ async def test_method_update(self, async_client: AsyncRunloop) -> None:
]
},
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncRunloop) -> None:
@@ -620,6 +638,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncRunloop) -
"problem_statement": "problem_statement",
"additional_context": {},
},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -647,7 +666,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncRunloop) -
"custom_gb_memory": 0,
"keep_alive_time_seconds": 0,
"launch_commands": ["string"],
- "resource_size_request": "SMALL",
+ "resource_size_request": "X_SMALL",
},
"prebuilt_id": "prebuilt_id",
"snapshot_id": "snapshot_id",
@@ -656,13 +675,14 @@ async def test_method_update_with_all_params(self, async_client: AsyncRunloop) -
metadata={"foo": "string"},
reference_output="reference_output",
)
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncRunloop) -> None:
response = await async_client.scenarios.with_raw_response.update(
id="id",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -682,13 +702,14 @@ async def test_raw_response_update(self, async_client: AsyncRunloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncRunloop) -> None:
async with async_client.scenarios.with_streaming_response.update(
id="id",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -708,7 +729,7 @@ async def test_streaming_response_update(self, async_client: AsyncRunloop) -> No
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(ScenarioView, scenario, path=["response"])
+ assert_matches_type(ScenarioUpdateResponse, scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -718,6 +739,7 @@ async def test_path_params_update(self, async_client: AsyncRunloop) -> None:
await async_client.scenarios.with_raw_response.update(
id="",
input_context={"problem_statement": "problem_statement"},
+ is_public=True,
name="name",
scoring_contract={
"scoring_function_parameters": [
@@ -737,7 +759,7 @@ async def test_path_params_update(self, async_client: AsyncRunloop) -> None:
@parametrize
async def test_method_list(self, async_client: AsyncRunloop) -> None:
scenario = await async_client.scenarios.list()
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncRunloop) -> None:
@@ -746,7 +768,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncRunloop) ->
name="name",
starting_after="starting_after",
)
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
@@ -755,7 +777,7 @@ async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None:
@@ -764,14 +786,14 @@ async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListResponse], scenario, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_list_public(self, async_client: AsyncRunloop) -> None:
scenario = await async_client.scenarios.list_public()
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
@parametrize
async def test_method_list_public_with_all_params(self, async_client: AsyncRunloop) -> None:
@@ -780,7 +802,7 @@ async def test_method_list_public_with_all_params(self, async_client: AsyncRunlo
name="name",
starting_after="starting_after",
)
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
@parametrize
async def test_raw_response_list_public(self, async_client: AsyncRunloop) -> None:
@@ -789,7 +811,7 @@ async def test_raw_response_list_public(self, async_client: AsyncRunloop) -> Non
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
@parametrize
async def test_streaming_response_list_public(self, async_client: AsyncRunloop) -> None:
@@ -798,7 +820,7 @@ async def test_streaming_response_list_public(self, async_client: AsyncRunloop)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
scenario = await response.parse()
- assert_matches_type(AsyncScenariosCursorIDPage[ScenarioView], scenario, path=["response"])
+ assert_matches_type(AsyncScenariosCursorIDPage[ScenarioListPublicResponse], scenario, path=["response"])
assert cast(Any, response.is_closed) is True
diff --git a/tests/test_client.py b/tests/test_client.py
index 638f65a36..2453b22ac 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -1709,7 +1709,7 @@ def test_get_platform(self) -> None:
import threading
from runloop_api_client._utils import asyncify
- from runloop_api_client._base_client import get_platform
+ from runloop_api_client._base_client import get_platform
async def test_main() -> None:
result = await asyncify(get_platform)()
diff --git a/tests/test_transform.py b/tests/test_transform.py
index f30ab41ee..bc6003b5f 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -8,7 +8,7 @@
import pytest
-from runloop_api_client._types import Base64FileInput
+from runloop_api_client._types import NOT_GIVEN, Base64FileInput
from runloop_api_client._utils import (
PropertyInfo,
transform as _transform,
@@ -432,3 +432,22 @@ async def test_base64_file_input(use_async: bool) -> None:
assert await transform({"foo": io.BytesIO(b"Hello, world!")}, TypedDictBase64Input, use_async) == {
"foo": "SGVsbG8sIHdvcmxkIQ=="
} # type: ignore[comparison-overlap]
+
+
+@parametrize
+@pytest.mark.asyncio
+async def test_transform_skipping(use_async: bool) -> None:
+ # lists of ints are left as-is
+ data = [1, 2, 3]
+ assert await transform(data, List[int], use_async) is data
+
+ # iterables of ints are converted to a list
+ data = iter([1, 2, 3])
+ assert await transform(data, Iterable[int], use_async) == [1, 2, 3]
+
+
+@parametrize
+@pytest.mark.asyncio
+async def test_strips_notgiven(use_async: bool) -> None:
+ assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"}
+ assert await transform({"foo_bar": NOT_GIVEN}, Foo1, use_async) == {}