diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 4e205789d..2a8f4ffdd 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.3.0-alpha"
+ ".": "1.3.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 553c2ca6f..5eb10a624 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 108
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runloop-ai%2Frunloop-e583f34fdcdc18499c8692e8eb8021f6163201f0f77206934c712c319a674d43.yml
-openapi_spec_hash: f2fb3f7f5c1f62d3dc397cd02cd1007a
-config_hash: 42959fa2708796cc2f83937278dde733
+configured_endpoints: 103
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runloop-ai%2Frunloop-5359067a857aa94f69bae0d3311856be3e637da067fdc9dbf8bd26fe476efbd8.yml
+openapi_spec_hash: 5227ef7c306d5226c3aee8932b2e8c6a
+config_hash: cb43d4ca9e64d5a099199d6818d70539
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7599c1c80..be0b78508 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,15 +1,12 @@
# Changelog
-## 1.3.0-alpha (2026-01-20)
+## 1.3.0 (2026-01-22)
-Full Changelog: [v1.2.0...v1.3.0-alpha](https://github.com/runloopai/api-client-python/compare/v1.2.0...v1.3.0-alpha)
-
-### ⚠ BREAKING CHANGES
-
-* remove support for pydantic-v1, pydantic-v2 is now default ([#710](https://github.com/runloopai/api-client-python/issues/710))
+Full Changelog: [v1.3.0-alpha...v1.3.0](https://github.com/runloopai/api-client-python/compare/v1.3.0-alpha...v1.3.0)
### Features
+* **network-policy:** add network policies ([#720](https://github.com/runloopai/api-client-python/issues/720)) ([9b31116](https://github.com/runloopai/api-client-python/commit/9b311168fcda2554f8ea03c9698c1e284dc1bdba))
* **benchmarks:** add `update_scenarios` method to benchmarks resource ([71ec221](https://github.com/runloopai/api-client-python/commit/71ec221f1d0cad7aac33c0299d3f8b1aa97d0741))
* **blueprint:** Set cilium network policy on blueprint build ([#7006](https://github.com/runloopai/api-client-python/issues/7006)) ([95c62ac](https://github.com/runloopai/api-client-python/commit/95c62ac1e2689acdd83cac2dc85f1a639490d982))
* **client:** add support for binary request streaming ([d6c2200](https://github.com/runloopai/api-client-python/commit/d6c22008f7cd8a6a07055f29bcb4eca4914ec5e0))
@@ -31,6 +28,10 @@ Full Changelog: [v1.2.0...v1.3.0-alpha](https://github.com/runloopai/api-client-
### Chores
+* fix stainless spec / remove deprecated benchmark/runs ([#7074](https://github.com/runloopai/api-client-python/issues/7074)) ([3ed7d80](https://github.com/runloopai/api-client-python/commit/3ed7d806edc349b4bb4cfb8e0e54b78f272a6d75))
+* fix tests and linting ([#719](https://github.com/runloopai/api-client-python/issues/719)) ([74db814](https://github.com/runloopai/api-client-python/commit/74db81455af44b94896e8adc7a6e24ba71b9e2ef))
+* update network policy create params descriptions ([#7069](https://github.com/runloopai/api-client-python/issues/7069)) ([b151692](https://github.com/runloopai/api-client-python/commit/b15169283ff344c447b203b6bc3589f05c400ea7))
+* remove support for pydantic-v1, pydantic-v2 is now default ([#710](https://github.com/runloopai/api-client-python/issues/710))
* add documentation url to pypi project page ([#711](https://github.com/runloopai/api-client-python/issues/711)) ([7afb327](https://github.com/runloopai/api-client-python/commit/7afb32731842ebee4f479837959ccac856bd5e85))
* add missing docstrings ([a198632](https://github.com/runloopai/api-client-python/commit/a198632f6a3936bcf5b5b4f4e6324461c4853893))
* **devbox:** Remove network policy from devbox view; use launch params instead ([#7025](https://github.com/runloopai/api-client-python/issues/7025)) ([d53af14](https://github.com/runloopai/api-client-python/commit/d53af14f6f55144859c8257d936a115989563b6d))
diff --git a/README.md b/README.md
index 0467fb6f0..14e808a10 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ The REST API documentation can be found on
```sh
# install from PyPI
-pip install --pre runloop_api_client
+pip install runloop_api_client
```
## Usage
@@ -96,7 +96,7 @@ You can enable this by installing `aiohttp`:
```sh
# install from PyPI
-pip install --pre runloop_api_client[aiohttp]
+pip install runloop_api_client[aiohttp]
```
Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
diff --git a/api.md b/api.md
index e712ddbb2..32d35cfbd 100644
--- a/api.md
+++ b/api.md
@@ -20,8 +20,6 @@ Types:
```python
from runloop_api_client.types import (
BenchmarkCreateParameters,
- BenchmarkRunListView,
- BenchmarkRunView,
BenchmarkScenarioUpdateParameters,
BenchmarkUpdateParameters,
BenchmarkView,
@@ -32,31 +30,21 @@ from runloop_api_client.types import (
Methods:
-- client.benchmarks.create(\*\*params) -> BenchmarkView
-- client.benchmarks.retrieve(id) -> BenchmarkView
-- client.benchmarks.update(id, \*\*params) -> BenchmarkView
-- client.benchmarks.list(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkView]
-- client.benchmarks.definitions(id, \*\*params) -> ScenarioDefinitionListView
-- client.benchmarks.list_public(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkView]
-- client.benchmarks.start_run(\*\*params) -> BenchmarkRunView
-- client.benchmarks.update_scenarios(id, \*\*params) -> BenchmarkView
-
-## Runs
-
-Methods:
-
-- client.benchmarks.runs.retrieve(id) -> BenchmarkRunView
-- client.benchmarks.runs.list(\*\*params) -> SyncBenchmarkRunsCursorIDPage[BenchmarkRunView]
-- client.benchmarks.runs.cancel(id) -> BenchmarkRunView
-- client.benchmarks.runs.complete(id) -> BenchmarkRunView
-- client.benchmarks.runs.list_scenario_runs(id, \*\*params) -> SyncBenchmarkRunsCursorIDPage[ScenarioRunView]
+- client.benchmarks.create(\*\*params) -> BenchmarkView
+- client.benchmarks.retrieve(id) -> BenchmarkView
+- client.benchmarks.update(id, \*\*params) -> BenchmarkView
+- client.benchmarks.list(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkView]
+- client.benchmarks.definitions(id, \*\*params) -> ScenarioDefinitionListView
+- client.benchmarks.list_public(\*\*params) -> SyncBenchmarksCursorIDPage[BenchmarkView]
+- client.benchmarks.start_run(\*\*params) -> BenchmarkRunView
+- client.benchmarks.update_scenarios(id, \*\*params) -> BenchmarkView
# BenchmarkRuns
Types:
```python
-from runloop_api_client.types import BenchmarkRunListView, BenchmarkRunView, ScenarioRunListView
+from runloop_api_client.types import BenchmarkRunListView, BenchmarkRunView
```
Methods:
diff --git a/pyproject.toml b/pyproject.toml
index 138a7b3a5..5007d5e66 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "runloop_api_client"
-version = "1.3.0-alpha"
+version = "1.3.0"
description = "The official Python library for the runloop API"
dynamic = ["readme"]
license = "MIT"
diff --git a/src/runloop_api_client/_client.py b/src/runloop_api_client/_client.py
index 603a43dc2..557170af6 100644
--- a/src/runloop_api_client/_client.py
+++ b/src/runloop_api_client/_client.py
@@ -46,13 +46,13 @@
from .resources.agents import AgentsResource, AsyncAgentsResource
from .resources.objects import ObjectsResource, AsyncObjectsResource
from .resources.secrets import SecretsResource, AsyncSecretsResource
+ from .resources.benchmarks import BenchmarksResource, AsyncBenchmarksResource
from .resources.blueprints import BlueprintsResource, AsyncBlueprintsResource
from .resources.repositories import RepositoriesResource, AsyncRepositoriesResource
from .resources.benchmark_runs import BenchmarkRunsResource, AsyncBenchmarkRunsResource
from .resources.network_policies import NetworkPoliciesResource, AsyncNetworkPoliciesResource
from .resources.devboxes.devboxes import DevboxesResource, AsyncDevboxesResource
from .resources.scenarios.scenarios import ScenariosResource, AsyncScenariosResource
- from .resources.benchmarks.benchmarks import BenchmarksResource, AsyncBenchmarksResource
__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "Runloop", "AsyncRunloop", "Client", "AsyncClient"]
diff --git a/src/runloop_api_client/_version.py b/src/runloop_api_client/_version.py
index ad6d43f7c..c746bdc5e 100644
--- a/src/runloop_api_client/_version.py
+++ b/src/runloop_api_client/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "runloop_api_client"
-__version__ = "1.3.0-alpha" # x-release-please-version
+__version__ = "1.3.0" # x-release-please-version
diff --git a/src/runloop_api_client/resources/benchmarks/benchmarks.py b/src/runloop_api_client/resources/benchmarks.py
similarity index 96%
rename from src/runloop_api_client/resources/benchmarks/benchmarks.py
rename to src/runloop_api_client/resources/benchmarks.py
index 9d9a30b5d..d23992bd2 100644
--- a/src/runloop_api_client/resources/benchmarks/benchmarks.py
+++ b/src/runloop_api_client/resources/benchmarks.py
@@ -6,15 +6,7 @@
import httpx
-from .runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from ...types import (
+from ..types import (
benchmark_list_params,
benchmark_create_params,
benchmark_update_params,
@@ -23,31 +15,27 @@
benchmark_list_public_params,
benchmark_update_scenarios_params,
)
-from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
+from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ...pagination import SyncBenchmarksCursorIDPage, AsyncBenchmarksCursorIDPage
-from ..._base_client import AsyncPaginator, make_request_options
-from ...types.benchmark_view import BenchmarkView
-from ...types.benchmark_run_view import BenchmarkRunView
-from ...types.shared_params.run_profile import RunProfile
-from ...types.scenario_definition_list_view import ScenarioDefinitionListView
+from ..pagination import SyncBenchmarksCursorIDPage, AsyncBenchmarksCursorIDPage
+from .._base_client import AsyncPaginator, make_request_options
+from ..types.benchmark_view import BenchmarkView
+from ..types.benchmark_run_view import BenchmarkRunView
+from ..types.shared_params.run_profile import RunProfile
+from ..types.scenario_definition_list_view import ScenarioDefinitionListView
__all__ = ["BenchmarksResource", "AsyncBenchmarksResource"]
class BenchmarksResource(SyncAPIResource):
- @cached_property
- def runs(self) -> RunsResource:
- return RunsResource(self._client)
-
@cached_property
def with_raw_response(self) -> BenchmarksResourceWithRawResponse:
"""
@@ -513,10 +501,6 @@ def update_scenarios(
class AsyncBenchmarksResource(AsyncAPIResource):
- @cached_property
- def runs(self) -> AsyncRunsResource:
- return AsyncRunsResource(self._client)
-
@cached_property
def with_raw_response(self) -> AsyncBenchmarksResourceWithRawResponse:
"""
@@ -1010,10 +994,6 @@ def __init__(self, benchmarks: BenchmarksResource) -> None:
benchmarks.update_scenarios,
)
- @cached_property
- def runs(self) -> RunsResourceWithRawResponse:
- return RunsResourceWithRawResponse(self._benchmarks.runs)
-
class AsyncBenchmarksResourceWithRawResponse:
def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
@@ -1044,10 +1024,6 @@ def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
benchmarks.update_scenarios,
)
- @cached_property
- def runs(self) -> AsyncRunsResourceWithRawResponse:
- return AsyncRunsResourceWithRawResponse(self._benchmarks.runs)
-
class BenchmarksResourceWithStreamingResponse:
def __init__(self, benchmarks: BenchmarksResource) -> None:
@@ -1078,10 +1054,6 @@ def __init__(self, benchmarks: BenchmarksResource) -> None:
benchmarks.update_scenarios,
)
- @cached_property
- def runs(self) -> RunsResourceWithStreamingResponse:
- return RunsResourceWithStreamingResponse(self._benchmarks.runs)
-
class AsyncBenchmarksResourceWithStreamingResponse:
def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
@@ -1111,7 +1083,3 @@ def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
self.update_scenarios = async_to_streamed_response_wrapper(
benchmarks.update_scenarios,
)
-
- @cached_property
- def runs(self) -> AsyncRunsResourceWithStreamingResponse:
- return AsyncRunsResourceWithStreamingResponse(self._benchmarks.runs)
diff --git a/src/runloop_api_client/resources/benchmarks/__init__.py b/src/runloop_api_client/resources/benchmarks/__init__.py
deleted file mode 100644
index f34bcd900..000000000
--- a/src/runloop_api_client/resources/benchmarks/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .runs import (
- RunsResource,
- AsyncRunsResource,
- RunsResourceWithRawResponse,
- AsyncRunsResourceWithRawResponse,
- RunsResourceWithStreamingResponse,
- AsyncRunsResourceWithStreamingResponse,
-)
-from .benchmarks import (
- BenchmarksResource,
- AsyncBenchmarksResource,
- BenchmarksResourceWithRawResponse,
- AsyncBenchmarksResourceWithRawResponse,
- BenchmarksResourceWithStreamingResponse,
- AsyncBenchmarksResourceWithStreamingResponse,
-)
-
-__all__ = [
- "RunsResource",
- "AsyncRunsResource",
- "RunsResourceWithRawResponse",
- "AsyncRunsResourceWithRawResponse",
- "RunsResourceWithStreamingResponse",
- "AsyncRunsResourceWithStreamingResponse",
- "BenchmarksResource",
- "AsyncBenchmarksResource",
- "BenchmarksResourceWithRawResponse",
- "AsyncBenchmarksResourceWithRawResponse",
- "BenchmarksResourceWithStreamingResponse",
- "AsyncBenchmarksResourceWithStreamingResponse",
-]
diff --git a/src/runloop_api_client/resources/benchmarks/runs.py b/src/runloop_api_client/resources/benchmarks/runs.py
deleted file mode 100644
index fe085ede7..000000000
--- a/src/runloop_api_client/resources/benchmarks/runs.py
+++ /dev/null
@@ -1,646 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...pagination import SyncBenchmarkRunsCursorIDPage, AsyncBenchmarkRunsCursorIDPage
-from ..._base_client import AsyncPaginator, make_request_options
-from ...types.benchmarks import run_list_params, run_list_scenario_runs_params
-from ...types.scenario_run_view import ScenarioRunView
-from ...types.benchmark_run_view import BenchmarkRunView
-
-__all__ = ["RunsResource", "AsyncRunsResource"]
-
-
-class RunsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> RunsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
- """
- return RunsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RunsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
- """
- return RunsResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- def retrieve(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BenchmarkRunView:
- """
- Get a BenchmarkRun given ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return self._get(
- f"/v1/benchmarks/runs/{id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- def list(
- self,
- *,
- benchmark_id: str | Omit = omit,
- limit: int | Omit = omit,
- name: str | Omit = omit,
- starting_after: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> SyncBenchmarkRunsCursorIDPage[BenchmarkRunView]:
- """
- List all BenchmarkRuns matching filter.
-
- Args:
- benchmark_id: The Benchmark ID to filter by.
-
- limit: The limit of items to return. Default is 20. Max is 5000.
-
- name: Filter by name
-
- starting_after: Load the next page of data starting after the item with the given ID.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get_api_list(
- "/v1/benchmarks/runs",
- page=SyncBenchmarkRunsCursorIDPage[BenchmarkRunView],
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "benchmark_id": benchmark_id,
- "limit": limit,
- "name": name,
- "starting_after": starting_after,
- },
- run_list_params.RunListParams,
- ),
- ),
- model=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- def cancel(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- idempotency_key: str | None = None,
- ) -> BenchmarkRunView:
- """
- Cancel a currently running Benchmark run.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
-
- idempotency_key: Specify a custom idempotency key for this request
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return self._post(
- f"/v1/benchmarks/runs/{id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- idempotency_key=idempotency_key,
- ),
- cast_to=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- def complete(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- idempotency_key: str | None = None,
- ) -> BenchmarkRunView:
- """
- Complete a currently running BenchmarkRun.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
-
- idempotency_key: Specify a custom idempotency key for this request
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return self._post(
- f"/v1/benchmarks/runs/{id}/complete",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- idempotency_key=idempotency_key,
- ),
- cast_to=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- def list_scenario_runs(
- self,
- id: str,
- *,
- limit: int | Omit = omit,
- starting_after: str | Omit = omit,
- state: Literal["running", "scoring", "scored", "completed", "canceled", "timeout", "failed"] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> SyncBenchmarkRunsCursorIDPage[ScenarioRunView]:
- """
- List started scenario runs for a benchmark run.
-
- Args:
- limit: The limit of items to return. Default is 20. Max is 5000.
-
- starting_after: Load the next page of data starting after the item with the given ID.
-
- state: Filter by Scenario Run state
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return self._get_api_list(
- f"/v1/benchmarks/runs/{id}/scenario_runs",
- page=SyncBenchmarkRunsCursorIDPage[ScenarioRunView],
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "limit": limit,
- "starting_after": starting_after,
- "state": state,
- },
- run_list_scenario_runs_params.RunListScenarioRunsParams,
- ),
- ),
- model=ScenarioRunView,
- )
-
-
-class AsyncRunsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncRunsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRunsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
- """
- return AsyncRunsResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- async def retrieve(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BenchmarkRunView:
- """
- Get a BenchmarkRun given ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return await self._get(
- f"/v1/benchmarks/runs/{id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- def list(
- self,
- *,
- benchmark_id: str | Omit = omit,
- limit: int | Omit = omit,
- name: str | Omit = omit,
- starting_after: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> AsyncPaginator[BenchmarkRunView, AsyncBenchmarkRunsCursorIDPage[BenchmarkRunView]]:
- """
- List all BenchmarkRuns matching filter.
-
- Args:
- benchmark_id: The Benchmark ID to filter by.
-
- limit: The limit of items to return. Default is 20. Max is 5000.
-
- name: Filter by name
-
- starting_after: Load the next page of data starting after the item with the given ID.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get_api_list(
- "/v1/benchmarks/runs",
- page=AsyncBenchmarkRunsCursorIDPage[BenchmarkRunView],
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "benchmark_id": benchmark_id,
- "limit": limit,
- "name": name,
- "starting_after": starting_after,
- },
- run_list_params.RunListParams,
- ),
- ),
- model=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def cancel(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- idempotency_key: str | None = None,
- ) -> BenchmarkRunView:
- """
- Cancel a currently running Benchmark run.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
-
- idempotency_key: Specify a custom idempotency key for this request
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return await self._post(
- f"/v1/benchmarks/runs/{id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- idempotency_key=idempotency_key,
- ),
- cast_to=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def complete(
- self,
- id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- idempotency_key: str | None = None,
- ) -> BenchmarkRunView:
- """
- Complete a currently running BenchmarkRun.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
-
- idempotency_key: Specify a custom idempotency key for this request
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return await self._post(
- f"/v1/benchmarks/runs/{id}/complete",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- idempotency_key=idempotency_key,
- ),
- cast_to=BenchmarkRunView,
- )
-
- @typing_extensions.deprecated("deprecated")
- def list_scenario_runs(
- self,
- id: str,
- *,
- limit: int | Omit = omit,
- starting_after: str | Omit = omit,
- state: Literal["running", "scoring", "scored", "completed", "canceled", "timeout", "failed"] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> AsyncPaginator[ScenarioRunView, AsyncBenchmarkRunsCursorIDPage[ScenarioRunView]]:
- """
- List started scenario runs for a benchmark run.
-
- Args:
- limit: The limit of items to return. Default is 20. Max is 5000.
-
- starting_after: Load the next page of data starting after the item with the given ID.
-
- state: Filter by Scenario Run state
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not id:
- raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
- return self._get_api_list(
- f"/v1/benchmarks/runs/{id}/scenario_runs",
- page=AsyncBenchmarkRunsCursorIDPage[ScenarioRunView],
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "limit": limit,
- "starting_after": starting_after,
- "state": state,
- },
- run_list_scenario_runs_params.RunListScenarioRunsParams,
- ),
- ),
- model=ScenarioRunView,
- )
-
-
-class RunsResourceWithRawResponse:
- def __init__(self, runs: RunsResource) -> None:
- self._runs = runs
-
- self.retrieve = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- runs.retrieve, # pyright: ignore[reportDeprecated],
- )
- )
- self.list = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- runs.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.cancel = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- runs.cancel, # pyright: ignore[reportDeprecated],
- )
- )
- self.complete = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- runs.complete, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_scenario_runs = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- runs.list_scenario_runs, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncRunsResourceWithRawResponse:
- def __init__(self, runs: AsyncRunsResource) -> None:
- self._runs = runs
-
- self.retrieve = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- runs.retrieve, # pyright: ignore[reportDeprecated],
- )
- )
- self.list = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- runs.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.cancel = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- runs.cancel, # pyright: ignore[reportDeprecated],
- )
- )
- self.complete = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- runs.complete, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_scenario_runs = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- runs.list_scenario_runs, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class RunsResourceWithStreamingResponse:
- def __init__(self, runs: RunsResource) -> None:
- self._runs = runs
-
- self.retrieve = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- runs.retrieve, # pyright: ignore[reportDeprecated],
- )
- )
- self.list = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- runs.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.cancel = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- runs.cancel, # pyright: ignore[reportDeprecated],
- )
- )
- self.complete = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- runs.complete, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_scenario_runs = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- runs.list_scenario_runs, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncRunsResourceWithStreamingResponse:
- def __init__(self, runs: AsyncRunsResource) -> None:
- self._runs = runs
-
- self.retrieve = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- runs.retrieve, # pyright: ignore[reportDeprecated],
- )
- )
- self.list = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- runs.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.cancel = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- runs.cancel, # pyright: ignore[reportDeprecated],
- )
- )
- self.complete = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- runs.complete, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_scenario_runs = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- runs.list_scenario_runs, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/runloop_api_client/resources/network_policies.py b/src/runloop_api_client/resources/network_policies.py
index 5b96bf4b2..1cda5b42d 100644
--- a/src/runloop_api_client/resources/network_policies.py
+++ b/src/runloop_api_client/resources/network_policies.py
@@ -69,12 +69,14 @@ def create(
name: The human-readable name for the NetworkPolicy. Must be unique within the
account.
- allow_all: If true, all egress traffic is allowed (ALLOW_ALL policy). Defaults to false.
-
- allow_devbox_to_devbox: If true, allows traffic between the account's own devboxes via tunnels. Defaults
+ allow_all: (Optional) If true, all egress traffic is allowed (ALLOW_ALL policy). Defaults
to false.
- allowed_hostnames: DNS-based allow list with wildcard support. Examples: ['github.com',
+ allow_devbox_to_devbox: (Optional) If true, allows traffic between the account's own devboxes via
+ tunnels. Defaults to false. If allow_all is true, this is automatically set to
+ true.
+
+ allowed_hostnames: (Optional) DNS-based allow list with wildcard support. Examples: ['github.com',
'*.npmjs.org'].
description: Optional description for the NetworkPolicy.
@@ -353,12 +355,14 @@ async def create(
name: The human-readable name for the NetworkPolicy. Must be unique within the
account.
- allow_all: If true, all egress traffic is allowed (ALLOW_ALL policy). Defaults to false.
-
- allow_devbox_to_devbox: If true, allows traffic between the account's own devboxes via tunnels. Defaults
+ allow_all: (Optional) If true, all egress traffic is allowed (ALLOW_ALL policy). Defaults
to false.
- allowed_hostnames: DNS-based allow list with wildcard support. Examples: ['github.com',
+ allow_devbox_to_devbox: (Optional) If true, allows traffic between the account's own devboxes via
+ tunnels. Defaults to false. If allow_all is true, this is automatically set to
+ true.
+
+ allowed_hostnames: (Optional) DNS-based allow list with wildcard support. Examples: ['github.com',
'*.npmjs.org'].
description: Optional description for the NetworkPolicy.
diff --git a/src/runloop_api_client/sdk/_types.py b/src/runloop_api_client/sdk/_types.py
index 1b5f984ac..d367f927e 100644
--- a/src/runloop_api_client/sdk/_types.py
+++ b/src/runloop_api_client/sdk/_types.py
@@ -28,16 +28,15 @@
NetworkPolicyUpdateParams,
DevboxReadFileContentsParams,
DevboxWriteFileContentsParams,
+ BenchmarkRunListScenarioRunsParams,
)
from .._types import Body, Query, Headers, Timeout, NotGiven
from ..lib.polling import PollingConfig
from ..types.devboxes import DiskSnapshotListParams, DiskSnapshotUpdateParams
from ..types.scenarios import ScorerListParams, ScorerCreateParams, ScorerUpdateParams, ScorerValidateParams
-from ..types.benchmarks import RunListScenarioRunsParams
from ..types.devbox_create_params import DevboxBaseCreateParams
from ..types.scenario_start_run_params import ScenarioStartRunBaseParams
from ..types.benchmark_start_run_params import BenchmarkSelfStartRunParams
-from ..types.benchmarks.run_list_params import RunSelfListParams
from ..types.devbox_execute_async_params import DevboxNiceExecuteAsyncParams
LogCallback = Callable[[str], None]
@@ -233,11 +232,18 @@ class SDKBenchmarkStartRunParams(BenchmarkSelfStartRunParams, LongRequestOptions
pass
-class SDKBenchmarkListRunsParams(RunSelfListParams, BaseRequestOptions):
- pass
+class SDKBenchmarkListRunsParams(BaseRequestOptions, total=False):
+ limit: int
+ """The limit of items to return. Default is 20. Max is 5000."""
+
+ name: str
+ """Filter by name"""
+
+ starting_after: str
+ """Load the next page of data starting after the item with the given ID."""
-class SDKBenchmarkRunListScenarioRunsParams(RunListScenarioRunsParams, BaseRequestOptions):
+class SDKBenchmarkRunListScenarioRunsParams(BenchmarkRunListScenarioRunsParams, BaseRequestOptions):
pass
diff --git a/src/runloop_api_client/types/benchmarks/__init__.py b/src/runloop_api_client/types/benchmarks/__init__.py
deleted file mode 100644
index 2fb29daa0..000000000
--- a/src/runloop_api_client/types/benchmarks/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .run_list_params import RunListParams as RunListParams
-from .run_list_scenario_runs_params import RunListScenarioRunsParams as RunListScenarioRunsParams
diff --git a/src/runloop_api_client/types/benchmarks/run_list_params.py b/src/runloop_api_client/types/benchmarks/run_list_params.py
deleted file mode 100644
index a75e1b592..000000000
--- a/src/runloop_api_client/types/benchmarks/run_list_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["RunListParams"]
-
-
-# Split into separate params so that OO SDK list_runs params can omit the benchmark_id
-# Neither of these params are exposed to the user, only the derived SDKBenchmarkListRunsParams
-class RunSelfListParams(TypedDict, total=False):
- limit: int
- """The limit of items to return. Default is 20. Max is 5000."""
-
- name: str
- """Filter by name"""
-
- starting_after: str
- """Load the next page of data starting after the item with the given ID."""
-
-
-class RunListParams(RunSelfListParams, total=False):
- benchmark_id: str
- """The Benchmark ID to filter by."""
diff --git a/src/runloop_api_client/types/benchmarks/run_list_scenario_runs_params.py b/src/runloop_api_client/types/benchmarks/run_list_scenario_runs_params.py
deleted file mode 100644
index ddce6aa4a..000000000
--- a/src/runloop_api_client/types/benchmarks/run_list_scenario_runs_params.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["RunListScenarioRunsParams"]
-
-
-class RunListScenarioRunsParams(TypedDict, total=False):
- limit: int
- """The limit of items to return. Default is 20. Max is 5000."""
-
- starting_after: str
- """Load the next page of data starting after the item with the given ID."""
-
- state: Literal["running", "scoring", "scored", "completed", "canceled", "timeout", "failed"]
- """Filter by Scenario Run state"""
diff --git a/src/runloop_api_client/types/network_policy_create_params.py b/src/runloop_api_client/types/network_policy_create_params.py
index b70c6c8f9..e1a718a48 100644
--- a/src/runloop_api_client/types/network_policy_create_params.py
+++ b/src/runloop_api_client/types/network_policy_create_params.py
@@ -18,16 +18,20 @@ class NetworkPolicyCreateParams(TypedDict, total=False):
"""
allow_all: Optional[bool]
- """If true, all egress traffic is allowed (ALLOW_ALL policy). Defaults to false."""
-
- allow_devbox_to_devbox: Optional[bool]
- """If true, allows traffic between the account's own devboxes via tunnels.
+ """(Optional) If true, all egress traffic is allowed (ALLOW_ALL policy).
Defaults to false.
"""
+ allow_devbox_to_devbox: Optional[bool]
+ """
+ (Optional) If true, allows traffic between the account's own devboxes via
+ tunnels. Defaults to false. If allow_all is true, this is automatically set to
+ true.
+ """
+
allowed_hostnames: Optional[SequenceNotStr[str]]
- """DNS-based allow list with wildcard support.
+ """(Optional) DNS-based allow list with wildcard support.
Examples: ['github.com', '*.npmjs.org'].
"""
diff --git a/tests/api_resources/benchmarks/__init__.py b/tests/api_resources/benchmarks/__init__.py
deleted file mode 100644
index fd8019a9a..000000000
--- a/tests/api_resources/benchmarks/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/benchmarks/test_runs.py b/tests/api_resources/benchmarks/test_runs.py
deleted file mode 100644
index a610f1fc4..000000000
--- a/tests/api_resources/benchmarks/test_runs.py
+++ /dev/null
@@ -1,477 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from runloop_api_client import Runloop, AsyncRunloop
-from runloop_api_client.types import ScenarioRunView, BenchmarkRunView
-from runloop_api_client.pagination import SyncBenchmarkRunsCursorIDPage, AsyncBenchmarkRunsCursorIDPage
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestRuns:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = client.benchmarks.runs.retrieve(
- "id",
- )
-
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.benchmarks.runs.with_raw_response.retrieve(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with client.benchmarks.runs.with_streaming_response.retrieve(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- client.benchmarks.runs.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- def test_method_list(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = client.benchmarks.runs.list()
-
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- @parametrize
- def test_method_list_with_all_params(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = client.benchmarks.runs.list(
- benchmark_id="benchmark_id",
- limit=0,
- name="name",
- starting_after="starting_after",
- )
-
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.benchmarks.runs.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with client.benchmarks.runs.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_cancel(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = client.benchmarks.runs.cancel(
- "id",
- )
-
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- def test_raw_response_cancel(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.benchmarks.runs.with_raw_response.cancel(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- def test_streaming_response_cancel(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with client.benchmarks.runs.with_streaming_response.cancel(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_cancel(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- client.benchmarks.runs.with_raw_response.cancel(
- "",
- )
-
- @parametrize
- def test_method_complete(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = client.benchmarks.runs.complete(
- "id",
- )
-
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- def test_raw_response_complete(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.benchmarks.runs.with_raw_response.complete(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- def test_streaming_response_complete(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with client.benchmarks.runs.with_streaming_response.complete(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_complete(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- client.benchmarks.runs.with_raw_response.complete(
- "",
- )
-
- @parametrize
- def test_method_list_scenario_runs(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = client.benchmarks.runs.list_scenario_runs(
- id="id",
- )
-
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- @parametrize
- def test_method_list_scenario_runs_with_all_params(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = client.benchmarks.runs.list_scenario_runs(
- id="id",
- limit=0,
- starting_after="starting_after",
- state="running",
- )
-
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- @parametrize
- def test_raw_response_list_scenario_runs(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.benchmarks.runs.with_raw_response.list_scenario_runs(
- id="id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = response.parse()
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- @parametrize
- def test_streaming_response_list_scenario_runs(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with client.benchmarks.runs.with_streaming_response.list_scenario_runs(
- id="id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = response.parse()
- assert_matches_type(SyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_list_scenario_runs(self, client: Runloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- client.benchmarks.runs.with_raw_response.list_scenario_runs(
- id="",
- )
-
-
-class TestAsyncRuns:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = await async_client.benchmarks.runs.retrieve(
- "id",
- )
-
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.benchmarks.runs.with_raw_response.retrieve(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.benchmarks.runs.with_streaming_response.retrieve(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- await async_client.benchmarks.runs.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = await async_client.benchmarks.runs.list()
-
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = await async_client.benchmarks.runs.list(
- benchmark_id="benchmark_id",
- limit=0,
- name="name",
- starting_after="starting_after",
- )
-
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.benchmarks.runs.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.benchmarks.runs.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[BenchmarkRunView], run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_cancel(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = await async_client.benchmarks.runs.cancel(
- "id",
- )
-
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.benchmarks.runs.with_raw_response.cancel(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.benchmarks.runs.with_streaming_response.cancel(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- await async_client.benchmarks.runs.with_raw_response.cancel(
- "",
- )
-
- @parametrize
- async def test_method_complete(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = await async_client.benchmarks.runs.complete(
- "id",
- )
-
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- async def test_raw_response_complete(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.benchmarks.runs.with_raw_response.complete(
- "id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- @parametrize
- async def test_streaming_response_complete(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.benchmarks.runs.with_streaming_response.complete(
- "id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(BenchmarkRunView, run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_complete(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- await async_client.benchmarks.runs.with_raw_response.complete(
- "",
- )
-
- @parametrize
- async def test_method_list_scenario_runs(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = await async_client.benchmarks.runs.list_scenario_runs(
- id="id",
- )
-
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- @parametrize
- async def test_method_list_scenario_runs_with_all_params(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- run = await async_client.benchmarks.runs.list_scenario_runs(
- id="id",
- limit=0,
- starting_after="starting_after",
- state="running",
- )
-
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- @parametrize
- async def test_raw_response_list_scenario_runs(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.benchmarks.runs.with_raw_response.list_scenario_runs(
- id="id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- run = await response.parse()
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- @parametrize
- async def test_streaming_response_list_scenario_runs(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.benchmarks.runs.with_streaming_response.list_scenario_runs(
- id="id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- run = await response.parse()
- assert_matches_type(AsyncBenchmarkRunsCursorIDPage[ScenarioRunView], run, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_list_scenario_runs(self, async_client: AsyncRunloop) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
- await async_client.benchmarks.runs.with_raw_response.list_scenario_runs(
- id="",
- )
diff --git a/tests/smoketests/sdk/test_async_blueprint.py b/tests/smoketests/sdk/test_async_blueprint.py
index 453e26f6e..04856070b 100644
--- a/tests/smoketests/sdk/test_async_blueprint.py
+++ b/tests/smoketests/sdk/test_async_blueprint.py
@@ -22,7 +22,7 @@ async def test_blueprint_create_basic(self, async_sdk_client: AsyncRunloopSDK) -
name = unique_name("sdk-async-blueprint-basic")
blueprint = await async_sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y curl",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y curl",
)
try:
@@ -43,7 +43,7 @@ async def test_blueprint_create_with_system_setup(self, async_sdk_client: AsyncR
name = unique_name("sdk-async-blueprint-setup")
blueprint = await async_sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
system_setup_commands=[
"sudo apt-get update",
"sudo apt-get install -y wget",
@@ -64,7 +64,7 @@ async def test_blueprint_get_info(self, async_sdk_client: AsyncRunloopSDK) -> No
name = unique_name("sdk-async-blueprint-info")
blueprint = await async_sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04\nRUN echo 'test'",
+ dockerfile="FROM ubuntu:22.04\nRUN echo 'test'",
)
try:
@@ -81,7 +81,7 @@ async def test_blueprint_delete(self, async_sdk_client: AsyncRunloopSDK) -> None
"""Test deleting a blueprint."""
blueprint = await async_sdk_client.blueprint.create(
name=unique_name("sdk-async-blueprint-delete"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
blueprint_id = blueprint.id
@@ -102,7 +102,7 @@ async def test_blueprint_with_base_blueprint(self, async_sdk_client: AsyncRunloo
# Create base blueprint
base_blueprint = await async_sdk_client.blueprint.create(
name=unique_name("sdk-async-blueprint-base"),
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y curl",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y curl",
)
try:
@@ -135,7 +135,7 @@ async def test_blueprint_with_metadata(self, async_sdk_client: AsyncRunloopSDK)
blueprint = await async_sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
metadata=metadata,
)
@@ -168,7 +168,7 @@ async def test_get_blueprint_by_id(self, async_sdk_client: AsyncRunloopSDK) -> N
# Create a blueprint
created = await async_sdk_client.blueprint.create(
name=unique_name("sdk-async-blueprint-retrieve"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
try:
@@ -190,7 +190,7 @@ async def test_list_blueprints_by_name(self, async_sdk_client: AsyncRunloopSDK)
# Create a blueprint with a specific name
blueprint = await async_sdk_client.blueprint.create(
name=blueprint_name,
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
try:
@@ -216,7 +216,7 @@ async def test_create_devbox_from_blueprint(self, async_sdk_client: AsyncRunloop
# Create a blueprint
blueprint = await async_sdk_client.blueprint.create(
name=unique_name("sdk-async-blueprint-for-devbox"),
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y python3",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y python3",
)
try:
@@ -255,7 +255,7 @@ async def test_blueprint_invalid_dockerfile(self, async_sdk_client: AsyncRunloop
try:
blueprint = await async_sdk_client.blueprint.create(
name=unique_name("sdk-async-blueprint-invalid"),
- dockerfile="FROM ubuntu:20.04\nRUN INVALID_COMMAND_THAT_DOES_NOT_EXIST",
+ dockerfile="FROM ubuntu:22.04\nRUN INVALID_COMMAND_THAT_DOES_NOT_EXIST",
)
# If it somehow succeeds, verify it failed during build
info = await blueprint.get_info()
diff --git a/tests/smoketests/sdk/test_async_devbox.py b/tests/smoketests/sdk/test_async_devbox.py
index 1a0ddc7a0..4f2e6603c 100644
--- a/tests/smoketests/sdk/test_async_devbox.py
+++ b/tests/smoketests/sdk/test_async_devbox.py
@@ -365,6 +365,8 @@ async def test_resume_async(self, async_sdk_client: AsyncRunloopSDK) -> None:
name=unique_name("sdk-async-devbox-resume-async"),
launch_parameters={"resource_size_request": "SMALL", "keep_alive_time_seconds": 60 * 5},
)
+ # wait for devbox to be running
+ await devbox.await_running(polling_config=PollingConfig(timeout_seconds=120.0, interval_seconds=5.0))
try:
# Suspend the devbox
@@ -385,7 +387,7 @@ async def test_resume_async(self, async_sdk_client: AsyncRunloopSDK) -> None:
# Status might still be suspended or transitioning
info_after_resume = await devbox.get_info()
- assert info_after_resume.status in ["suspended", "running", "starting"]
+ assert info_after_resume.status in ["suspended", "running", "starting", "provisioning"]
# Now wait for running state explicitly
running_info = await devbox.await_running(
@@ -466,7 +468,7 @@ async def test_create_from_blueprint_id(self, async_sdk_client: AsyncRunloopSDK)
# First create a blueprint
blueprint = await async_sdk_client.blueprint.create(
name=unique_name("sdk-async-blueprint-for-devbox"),
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y curl",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y curl",
)
try:
@@ -494,7 +496,7 @@ async def test_create_from_blueprint_name(self, async_sdk_client: AsyncRunloopSD
# Create blueprint
blueprint = await async_sdk_client.blueprint.create(
name=blueprint_name,
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y wget",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y wget",
)
try:
diff --git a/tests/smoketests/sdk/test_async_network_policy.py b/tests/smoketests/sdk/test_async_network_policy.py
index d8fef917f..08e099434 100644
--- a/tests/smoketests/sdk/test_async_network_policy.py
+++ b/tests/smoketests/sdk/test_async_network_policy.py
@@ -88,13 +88,9 @@ async def test_network_policy_delete(self, async_sdk_client: AsyncRunloopSDK) ->
allowed_hostnames=["example.com"],
)
- policy_id = network_policy.id
result = await network_policy.delete()
assert result is not None
- # Verify it's deleted
- info = await async_sdk_client.api.network_policies.retrieve(policy_id)
- assert info.id == policy_id
class TestAsyncNetworkPolicyCreationVariations:
diff --git a/tests/smoketests/sdk/test_async_scenario.py b/tests/smoketests/sdk/test_async_scenario.py
index b0abf6a41..4faeae958 100644
--- a/tests/smoketests/sdk/test_async_scenario.py
+++ b/tests/smoketests/sdk/test_async_scenario.py
@@ -207,7 +207,7 @@ async def test_scenario_builder_with_blueprint(self, async_sdk_client: AsyncRunl
"""Test creating/updating a scenario from a blueprint."""
blueprint = await async_sdk_client.blueprint.create(
name=unique_name("sdk-smoketest-async-scenario-bp"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
builder = (
@@ -233,7 +233,7 @@ async def test_scenario_builder_with_snapshot(self, async_sdk_client: AsyncRunlo
# Create blueprint -> devbox -> snapshot chain
blueprint = await async_sdk_client.blueprint.create(
name=unique_name("sdk-smoketest-async-scenario-snap-bp"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
devbox = await async_sdk_client.devbox.create(blueprint_id=blueprint.id)
snapshot = await devbox.snapshot_disk(name=unique_name("sdk-smoketest-async-scenario-snap"))
diff --git a/tests/smoketests/sdk/test_blueprint.py b/tests/smoketests/sdk/test_blueprint.py
index 3ff6adb9e..0d55d0dce 100644
--- a/tests/smoketests/sdk/test_blueprint.py
+++ b/tests/smoketests/sdk/test_blueprint.py
@@ -22,7 +22,7 @@ def test_blueprint_create_basic(self, sdk_client: RunloopSDK) -> None:
name = unique_name("sdk-blueprint-basic")
blueprint = sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y curl",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y curl",
)
try:
@@ -43,7 +43,7 @@ def test_blueprint_create_with_system_setup(self, sdk_client: RunloopSDK) -> Non
name = unique_name("sdk-blueprint-setup")
blueprint = sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
system_setup_commands=[
"sudo apt-get update",
"sudo apt-get install -y wget",
@@ -64,7 +64,7 @@ def test_blueprint_get_info(self, sdk_client: RunloopSDK) -> None:
name = unique_name("sdk-blueprint-info")
blueprint = sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04\nRUN echo 'test'",
+ dockerfile="FROM ubuntu:22.04\nRUN echo 'test'",
)
try:
@@ -81,7 +81,7 @@ def test_blueprint_delete(self, sdk_client: RunloopSDK) -> None:
"""Test deleting a blueprint."""
blueprint = sdk_client.blueprint.create(
name=unique_name("sdk-blueprint-delete"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
blueprint_id = blueprint.id
@@ -102,7 +102,7 @@ def test_blueprint_with_base_blueprint(self, sdk_client: RunloopSDK) -> None:
# Create base blueprint
base_blueprint = sdk_client.blueprint.create(
name=unique_name("sdk-blueprint-base"),
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y curl",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y curl",
)
try:
@@ -135,7 +135,7 @@ def test_blueprint_with_metadata(self, sdk_client: RunloopSDK) -> None:
blueprint = sdk_client.blueprint.create(
name=name,
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
metadata=metadata,
)
@@ -168,7 +168,7 @@ def test_get_blueprint_by_id(self, sdk_client: RunloopSDK) -> None:
# Create a blueprint
created = sdk_client.blueprint.create(
name=unique_name("sdk-blueprint-retrieve"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
try:
@@ -190,7 +190,7 @@ def test_list_blueprints_by_name(self, sdk_client: RunloopSDK) -> None:
# Create a blueprint with a specific name
blueprint = sdk_client.blueprint.create(
name=blueprint_name,
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
try:
@@ -216,7 +216,7 @@ def test_create_devbox_from_blueprint(self, sdk_client: RunloopSDK) -> None:
# Create a blueprint
blueprint = sdk_client.blueprint.create(
name=unique_name("sdk-blueprint-for-devbox"),
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y python3",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y python3",
)
try:
@@ -255,7 +255,7 @@ def test_blueprint_invalid_dockerfile(self, sdk_client: RunloopSDK) -> None:
try:
blueprint = sdk_client.blueprint.create(
name=unique_name("sdk-blueprint-invalid"),
- dockerfile="FROM ubuntu:20.04\nRUN INVALID_COMMAND_THAT_DOES_NOT_EXIST",
+ dockerfile="FROM ubuntu:22.04\nRUN INVALID_COMMAND_THAT_DOES_NOT_EXIST",
)
# If it somehow succeeds, verify it failed during build
info = blueprint.get_info()
diff --git a/tests/smoketests/sdk/test_devbox.py b/tests/smoketests/sdk/test_devbox.py
index 77a9cdff3..87d5fc4d6 100644
--- a/tests/smoketests/sdk/test_devbox.py
+++ b/tests/smoketests/sdk/test_devbox.py
@@ -364,6 +364,8 @@ def test_resume_async(self, sdk_client: RunloopSDK) -> None:
name=unique_name("sdk-devbox-resume-async"),
launch_parameters={"resource_size_request": "SMALL", "keep_alive_time_seconds": 60 * 5},
)
+ # wait for devbox to be running
+ devbox.await_running(polling_config=PollingConfig(timeout_seconds=120.0, interval_seconds=5.0))
try:
# Suspend the devbox
@@ -382,7 +384,7 @@ def test_resume_async(self, sdk_client: RunloopSDK) -> None:
# Status might still be suspended or transitioning
info_after_resume = devbox.get_info()
- assert info_after_resume.status in ["suspended", "running", "starting"]
+ assert info_after_resume.status in ["suspended", "running", "starting", "provisioning"]
# Now wait for running state explicitly
running_info = devbox.await_running(
@@ -463,7 +465,7 @@ def test_create_from_blueprint_id(self, sdk_client: RunloopSDK) -> None:
# First create a blueprint
blueprint = sdk_client.blueprint.create(
name=unique_name("sdk-blueprint-for-devbox"),
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y curl",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y curl",
)
try:
@@ -491,7 +493,7 @@ def test_create_from_blueprint_name(self, sdk_client: RunloopSDK) -> None:
# Create blueprint
blueprint = sdk_client.blueprint.create(
name=blueprint_name,
- dockerfile="FROM ubuntu:20.04\nRUN apt-get update && apt-get install -y wget",
+ dockerfile="FROM ubuntu:22.04\nRUN apt-get update && apt-get install -y wget",
)
try:
diff --git a/tests/smoketests/sdk/test_network_policy.py b/tests/smoketests/sdk/test_network_policy.py
index 9457c338a..f398b74a9 100644
--- a/tests/smoketests/sdk/test_network_policy.py
+++ b/tests/smoketests/sdk/test_network_policy.py
@@ -88,13 +88,9 @@ def test_network_policy_delete(self, sdk_client: RunloopSDK) -> None:
allowed_hostnames=["example.com"],
)
- policy_id = network_policy.id
result = network_policy.delete()
assert result is not None
- # Verify it's deleted
- info = sdk_client.api.network_policies.retrieve(policy_id)
- assert info.id == policy_id
class TestNetworkPolicyCreationVariations:
diff --git a/tests/smoketests/sdk/test_scenario.py b/tests/smoketests/sdk/test_scenario.py
index 4128cfa29..694f5800a 100644
--- a/tests/smoketests/sdk/test_scenario.py
+++ b/tests/smoketests/sdk/test_scenario.py
@@ -206,7 +206,7 @@ def test_scenario_builder_with_blueprint(self, sdk_client: RunloopSDK) -> None:
"""Test creating/updating a scenario from a blueprint."""
blueprint = sdk_client.blueprint.create(
name=unique_name("sdk-smoketest-scenario-bp"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
builder = (
@@ -232,7 +232,7 @@ def test_scenario_builder_with_snapshot(self, sdk_client: RunloopSDK) -> None:
# Create blueprint -> devbox -> snapshot chain
blueprint = sdk_client.blueprint.create(
name=unique_name("sdk-smoketest-scenario-snap-bp"),
- dockerfile="FROM ubuntu:20.04",
+ dockerfile="FROM ubuntu:22.04",
)
devbox = sdk_client.devbox.create(blueprint_id=blueprint.id)
snapshot = devbox.snapshot_disk(name=unique_name("sdk-smoketest-scenario-snap"))