diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index d52d2b974..a26ebfc1e 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.13.0"
+ ".": "0.14.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 311ea3267..16278e6a4 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 46
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runloop-ai%2Frunloop-15f23a9e2e014d2e5d4d72c39ee31c5556060fb37d1490f308ecce5aeb41d5c8.yml
+configured_endpoints: 61
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/runloop-ai%2Frunloop-9644e0fb22c7f4f55c131356129c4da627ec8aa3a46f5305893e601b961fe1fc.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b3c38fee7..5ee106314 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## 0.14.0 (2025-01-29)
+
+Full Changelog: [v0.13.0...v0.14.0](https://github.com/runloopai/api-client-python/compare/v0.13.0...v0.14.0)
+
+### Features
+
+* **api:** api update ([#501](https://github.com/runloopai/api-client-python/issues/501)) ([cb1f8ac](https://github.com/runloopai/api-client-python/commit/cb1f8ac9e6ea44e33db8a4739cbb325a982840f4))
+* **api:** api update ([#502](https://github.com/runloopai/api-client-python/issues/502)) ([22d84a8](https://github.com/runloopai/api-client-python/commit/22d84a8727e147d26d4bfd82ca26a1ac8d32365f))
+
+
+### Bug Fixes
+
+* **tests:** make test_get_platform less flaky ([#500](https://github.com/runloopai/api-client-python/issues/500)) ([394d017](https://github.com/runloopai/api-client-python/commit/394d0172530ea1284173d3fd04a3a8a1f1dd78f9))
+
+
+### Chores
+
+* **internal:** codegen related update ([#497](https://github.com/runloopai/api-client-python/issues/497)) ([207a88e](https://github.com/runloopai/api-client-python/commit/207a88ed4bed2b87e10d15c49002661f1cf84f1c))
+
+
+### Documentation
+
+* **raw responses:** fix duplicate `the` ([#499](https://github.com/runloopai/api-client-python/issues/499)) ([80e393c](https://github.com/runloopai/api-client-python/commit/80e393c1c87319b6886cef1c2806c2ce8e467a54))
+
## 0.13.0 (2025-01-15)
Full Changelog: [v0.12.0...v0.13.0](https://github.com/runloopai/api-client-python/compare/v0.12.0...v0.13.0)
diff --git a/api.md b/api.md
index be9c20e21..2af2e46fa 100644
--- a/api.md
+++ b/api.md
@@ -4,6 +4,36 @@
from runloop_api_client.types import AfterIdle, CodeMountParameters, LaunchParameters
```
+# Benchmarks
+
+Types:
+
+```python
+from runloop_api_client.types import (
+ BenchmarkCreateParameters,
+ BenchmarkListView,
+ BenchmarkRunListView,
+ BenchmarkRunView,
+ BenchmarkView,
+ StartBenchmarkRunParameters,
+)
+```
+
+Methods:
+
+- client.benchmarks.create(\*\*params) -> BenchmarkView
+- client.benchmarks.retrieve(id) -> BenchmarkView
+- client.benchmarks.list(\*\*params) -> BenchmarkListView
+- client.benchmarks.start_run(\*\*params) -> BenchmarkRunView
+
+## Runs
+
+Methods:
+
+- client.benchmarks.runs.retrieve(id) -> BenchmarkRunView
+- client.benchmarks.runs.list(\*\*params) -> BenchmarkRunListView
+- client.benchmarks.runs.complete(id) -> BenchmarkRunView
+
# Blueprints
Types:
@@ -173,6 +203,43 @@ Methods:
- client.devboxes.executions.execute_sync(id, \*\*params) -> DevboxExecutionDetailView
- client.devboxes.executions.kill(execution_id, \*, devbox_id) -> DevboxAsyncExecutionDetailView
+# Scenarios
+
+Types:
+
+```python
+from runloop_api_client.types import (
+ InputContextParameters,
+ ScenarioCreateParameters,
+ ScenarioEnvironmentParameters,
+ ScenarioListView,
+ ScenarioRunListView,
+ ScenarioRunView,
+ ScenarioView,
+ ScoringContractParameters,
+ ScoringContractResultView,
+ ScoringFunctionParameters,
+ ScoringFunctionResultView,
+ StartScenarioRunParameters,
+)
+```
+
+Methods:
+
+- client.scenarios.create(\*\*params) -> ScenarioView
+- client.scenarios.retrieve(id) -> ScenarioView
+- client.scenarios.list(\*\*params) -> ScenarioListView
+- client.scenarios.start_run(\*\*params) -> ScenarioRunView
+
+## Runs
+
+Methods:
+
+- client.scenarios.runs.retrieve(id) -> ScenarioRunView
+- client.scenarios.runs.list(\*\*params) -> ScenarioRunListView
+- client.scenarios.runs.complete(id) -> ScenarioRunView
+- client.scenarios.runs.score(id) -> ScenarioRunView
+
# Repositories
Types:
diff --git a/mypy.ini b/mypy.ini
index f2a97a0a9..0a62aa9d6 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -41,7 +41,7 @@ cache_fine_grained = True
# ```
# Changing this codegen to make mypy happy would increase complexity
# and would not be worth it.
-disable_error_code = func-returns-value
+disable_error_code = func-returns-value,overload-cannot-match
# https://github.com/python/mypy/issues/12162
[mypy.overrides]
diff --git a/pyproject.toml b/pyproject.toml
index bca77395d..525fc8fbd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "runloop_api_client"
-version = "0.13.0"
+version = "0.14.0"
description = "The official Python library for the runloop API"
dynamic = ["readme"]
license = "MIT"
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 384992ae9..f18f7063b 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -49,7 +49,7 @@ markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
# via markdown-it-py
-mypy==1.13.0
+mypy==1.14.1
mypy-extensions==1.0.0
# via mypy
nest-asyncio==1.6.0
@@ -69,7 +69,7 @@ pydantic-core==2.27.1
# via pydantic
pygments==2.18.0
# via rich
-pyright==1.1.390
+pyright==1.1.392.post0
pytest==8.3.3
# via pytest-asyncio
pytest-asyncio==0.24.0
diff --git a/src/runloop_api_client/_client.py b/src/runloop_api_client/_client.py
index 2c254b387..8fc0cb6f8 100644
--- a/src/runloop_api_client/_client.py
+++ b/src/runloop_api_client/_client.py
@@ -33,13 +33,17 @@
AsyncAPIClient,
)
from .resources.devboxes import devboxes
+from .resources.scenarios import scenarios
+from .resources.benchmarks import benchmarks
__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "Runloop", "AsyncRunloop", "Client", "AsyncClient"]
class Runloop(SyncAPIClient):
+ benchmarks: benchmarks.BenchmarksResource
blueprints: blueprints.BlueprintsResource
devboxes: devboxes.DevboxesResource
+ scenarios: scenarios.ScenariosResource
repositories: repositories.RepositoriesResource
with_raw_response: RunloopWithRawResponse
with_streaming_response: RunloopWithStreamedResponse
@@ -100,8 +104,10 @@ def __init__(
self._idempotency_header = "x-request-id"
+ self.benchmarks = benchmarks.BenchmarksResource(self)
self.blueprints = blueprints.BlueprintsResource(self)
self.devboxes = devboxes.DevboxesResource(self)
+ self.scenarios = scenarios.ScenariosResource(self)
self.repositories = repositories.RepositoriesResource(self)
self.with_raw_response = RunloopWithRawResponse(self)
self.with_streaming_response = RunloopWithStreamedResponse(self)
@@ -212,8 +218,10 @@ def _make_status_error(
class AsyncRunloop(AsyncAPIClient):
+ benchmarks: benchmarks.AsyncBenchmarksResource
blueprints: blueprints.AsyncBlueprintsResource
devboxes: devboxes.AsyncDevboxesResource
+ scenarios: scenarios.AsyncScenariosResource
repositories: repositories.AsyncRepositoriesResource
with_raw_response: AsyncRunloopWithRawResponse
with_streaming_response: AsyncRunloopWithStreamedResponse
@@ -274,8 +282,10 @@ def __init__(
self._idempotency_header = "x-request-id"
+ self.benchmarks = benchmarks.AsyncBenchmarksResource(self)
self.blueprints = blueprints.AsyncBlueprintsResource(self)
self.devboxes = devboxes.AsyncDevboxesResource(self)
+ self.scenarios = scenarios.AsyncScenariosResource(self)
self.repositories = repositories.AsyncRepositoriesResource(self)
self.with_raw_response = AsyncRunloopWithRawResponse(self)
self.with_streaming_response = AsyncRunloopWithStreamedResponse(self)
@@ -387,29 +397,37 @@ def _make_status_error(
class RunloopWithRawResponse:
def __init__(self, client: Runloop) -> None:
+ self.benchmarks = benchmarks.BenchmarksResourceWithRawResponse(client.benchmarks)
self.blueprints = blueprints.BlueprintsResourceWithRawResponse(client.blueprints)
self.devboxes = devboxes.DevboxesResourceWithRawResponse(client.devboxes)
+ self.scenarios = scenarios.ScenariosResourceWithRawResponse(client.scenarios)
self.repositories = repositories.RepositoriesResourceWithRawResponse(client.repositories)
class AsyncRunloopWithRawResponse:
def __init__(self, client: AsyncRunloop) -> None:
+ self.benchmarks = benchmarks.AsyncBenchmarksResourceWithRawResponse(client.benchmarks)
self.blueprints = blueprints.AsyncBlueprintsResourceWithRawResponse(client.blueprints)
self.devboxes = devboxes.AsyncDevboxesResourceWithRawResponse(client.devboxes)
+ self.scenarios = scenarios.AsyncScenariosResourceWithRawResponse(client.scenarios)
self.repositories = repositories.AsyncRepositoriesResourceWithRawResponse(client.repositories)
class RunloopWithStreamedResponse:
def __init__(self, client: Runloop) -> None:
+ self.benchmarks = benchmarks.BenchmarksResourceWithStreamingResponse(client.benchmarks)
self.blueprints = blueprints.BlueprintsResourceWithStreamingResponse(client.blueprints)
self.devboxes = devboxes.DevboxesResourceWithStreamingResponse(client.devboxes)
+ self.scenarios = scenarios.ScenariosResourceWithStreamingResponse(client.scenarios)
self.repositories = repositories.RepositoriesResourceWithStreamingResponse(client.repositories)
class AsyncRunloopWithStreamedResponse:
def __init__(self, client: AsyncRunloop) -> None:
+ self.benchmarks = benchmarks.AsyncBenchmarksResourceWithStreamingResponse(client.benchmarks)
self.blueprints = blueprints.AsyncBlueprintsResourceWithStreamingResponse(client.blueprints)
self.devboxes = devboxes.AsyncDevboxesResourceWithStreamingResponse(client.devboxes)
+ self.scenarios = scenarios.AsyncScenariosResourceWithStreamingResponse(client.scenarios)
self.repositories = repositories.AsyncRepositoriesResourceWithStreamingResponse(client.repositories)
diff --git a/src/runloop_api_client/_response.py b/src/runloop_api_client/_response.py
index 1d55ca296..d765ed407 100644
--- a/src/runloop_api_client/_response.py
+++ b/src/runloop_api_client/_response.py
@@ -210,7 +210,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
return cast(R, response)
- if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
+ if (
+ inspect.isclass(
+ origin # pyright: ignore[reportUnknownArgumentType]
+ )
+ and not issubclass(origin, BaseModel)
+ and issubclass(origin, pydantic.BaseModel)
+ ):
raise TypeError(
"Pydantic models must subclass our base model type, e.g. `from runloop_api_client import BaseModel`"
)
diff --git a/src/runloop_api_client/_version.py b/src/runloop_api_client/_version.py
index f053467f5..9ac0bb875 100644
--- a/src/runloop_api_client/_version.py
+++ b/src/runloop_api_client/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "runloop_api_client"
-__version__ = "0.13.0" # x-release-please-version
+__version__ = "0.14.0" # x-release-please-version
diff --git a/src/runloop_api_client/resources/__init__.py b/src/runloop_api_client/resources/__init__.py
index 72fe76dc1..2c103b15e 100644
--- a/src/runloop_api_client/resources/__init__.py
+++ b/src/runloop_api_client/resources/__init__.py
@@ -8,6 +8,22 @@
DevboxesResourceWithStreamingResponse,
AsyncDevboxesResourceWithStreamingResponse,
)
+from .scenarios import (
+ ScenariosResource,
+ AsyncScenariosResource,
+ ScenariosResourceWithRawResponse,
+ AsyncScenariosResourceWithRawResponse,
+ ScenariosResourceWithStreamingResponse,
+ AsyncScenariosResourceWithStreamingResponse,
+)
+from .benchmarks import (
+ BenchmarksResource,
+ AsyncBenchmarksResource,
+ BenchmarksResourceWithRawResponse,
+ AsyncBenchmarksResourceWithRawResponse,
+ BenchmarksResourceWithStreamingResponse,
+ AsyncBenchmarksResourceWithStreamingResponse,
+)
from .blueprints import (
BlueprintsResource,
AsyncBlueprintsResource,
@@ -26,6 +42,12 @@
)
__all__ = [
+ "BenchmarksResource",
+ "AsyncBenchmarksResource",
+ "BenchmarksResourceWithRawResponse",
+ "AsyncBenchmarksResourceWithRawResponse",
+ "BenchmarksResourceWithStreamingResponse",
+ "AsyncBenchmarksResourceWithStreamingResponse",
"BlueprintsResource",
"AsyncBlueprintsResource",
"BlueprintsResourceWithRawResponse",
@@ -38,6 +60,12 @@
"AsyncDevboxesResourceWithRawResponse",
"DevboxesResourceWithStreamingResponse",
"AsyncDevboxesResourceWithStreamingResponse",
+ "ScenariosResource",
+ "AsyncScenariosResource",
+ "ScenariosResourceWithRawResponse",
+ "AsyncScenariosResourceWithRawResponse",
+ "ScenariosResourceWithStreamingResponse",
+ "AsyncScenariosResourceWithStreamingResponse",
"RepositoriesResource",
"AsyncRepositoriesResource",
"RepositoriesResourceWithRawResponse",
diff --git a/src/runloop_api_client/resources/benchmarks/__init__.py b/src/runloop_api_client/resources/benchmarks/__init__.py
new file mode 100644
index 000000000..f34bcd900
--- /dev/null
+++ b/src/runloop_api_client/resources/benchmarks/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .runs import (
+ RunsResource,
+ AsyncRunsResource,
+ RunsResourceWithRawResponse,
+ AsyncRunsResourceWithRawResponse,
+ RunsResourceWithStreamingResponse,
+ AsyncRunsResourceWithStreamingResponse,
+)
+from .benchmarks import (
+ BenchmarksResource,
+ AsyncBenchmarksResource,
+ BenchmarksResourceWithRawResponse,
+ AsyncBenchmarksResourceWithRawResponse,
+ BenchmarksResourceWithStreamingResponse,
+ AsyncBenchmarksResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "RunsResource",
+ "AsyncRunsResource",
+ "RunsResourceWithRawResponse",
+ "AsyncRunsResourceWithRawResponse",
+ "RunsResourceWithStreamingResponse",
+ "AsyncRunsResourceWithStreamingResponse",
+ "BenchmarksResource",
+ "AsyncBenchmarksResource",
+ "BenchmarksResourceWithRawResponse",
+ "AsyncBenchmarksResourceWithRawResponse",
+ "BenchmarksResourceWithStreamingResponse",
+ "AsyncBenchmarksResourceWithStreamingResponse",
+]
diff --git a/src/runloop_api_client/resources/benchmarks/benchmarks.py b/src/runloop_api_client/resources/benchmarks/benchmarks.py
new file mode 100644
index 000000000..9b016c684
--- /dev/null
+++ b/src/runloop_api_client/resources/benchmarks/benchmarks.py
@@ -0,0 +1,532 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+import httpx
+
+from .runs import (
+ RunsResource,
+ AsyncRunsResource,
+ RunsResourceWithRawResponse,
+ AsyncRunsResourceWithRawResponse,
+ RunsResourceWithStreamingResponse,
+ AsyncRunsResourceWithStreamingResponse,
+)
+from ...types import benchmark_list_params, benchmark_create_params, benchmark_start_run_params
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.benchmark_view import BenchmarkView
+from ...types.benchmark_run_view import BenchmarkRunView
+from ...types.benchmark_list_view import BenchmarkListView
+
+__all__ = ["BenchmarksResource", "AsyncBenchmarksResource"]
+
+
+class BenchmarksResource(SyncAPIResource):
+ @cached_property
+ def runs(self) -> RunsResource:
+ return RunsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> BenchmarksResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return BenchmarksResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> BenchmarksResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return BenchmarksResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ scenario_ids: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> BenchmarkView:
+ """
+ Create a Benchmark with a set of Scenarios.
+
+ Args:
+ name: The name of the Benchmark.
+
+ scenario_ids: The Scenario IDs that make up the Benchmark.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return self._post(
+ "/v1/benchmarks",
+ body=maybe_transform(
+ {
+ "name": name,
+ "scenario_ids": scenario_ids,
+ },
+ benchmark_create_params.BenchmarkCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=BenchmarkView,
+ )
+
+ def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkView:
+ """
+ Get a previously created Benchmark.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._get(
+ f"/v1/benchmarks/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BenchmarkView,
+ )
+
+ def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkListView:
+ """
+ List all Benchmarks matching filter.
+
+ Args:
+ limit: The limit of items to return. Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v1/benchmarks",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ benchmark_list_params.BenchmarkListParams,
+ ),
+ ),
+ cast_to=BenchmarkListView,
+ )
+
+ def start_run(
+ self,
+ *,
+ benchmark_id: str,
+ run_name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> BenchmarkRunView:
+ """
+ Start a new BenchmarkRun based on the provided Benchmark.
+
+ Args:
+ benchmark_id: ID of the Benchmark to run.
+
+ run_name: Display name of the run.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return self._post(
+ "/v1/benchmarks/start_run",
+ body=maybe_transform(
+ {
+ "benchmark_id": benchmark_id,
+ "run_name": run_name,
+ },
+ benchmark_start_run_params.BenchmarkStartRunParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=BenchmarkRunView,
+ )
+
+
+class AsyncBenchmarksResource(AsyncAPIResource):
+ @cached_property
+ def runs(self) -> AsyncRunsResource:
+ return AsyncRunsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncBenchmarksResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncBenchmarksResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncBenchmarksResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return AsyncBenchmarksResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ scenario_ids: Optional[List[str]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> BenchmarkView:
+ """
+ Create a Benchmark with a set of Scenarios.
+
+ Args:
+ name: The name of the Benchmark.
+
+ scenario_ids: The Scenario IDs that make up the Benchmark.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return await self._post(
+ "/v1/benchmarks",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "scenario_ids": scenario_ids,
+ },
+ benchmark_create_params.BenchmarkCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=BenchmarkView,
+ )
+
+ async def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkView:
+ """
+ Get a previously created Benchmark.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._get(
+ f"/v1/benchmarks/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BenchmarkView,
+ )
+
+ async def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkListView:
+ """
+ List all Benchmarks matching filter.
+
+ Args:
+ limit: The limit of items to return. Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v1/benchmarks",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ benchmark_list_params.BenchmarkListParams,
+ ),
+ ),
+ cast_to=BenchmarkListView,
+ )
+
+ async def start_run(
+ self,
+ *,
+ benchmark_id: str,
+ run_name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> BenchmarkRunView:
+ """
+ Start a new BenchmarkRun based on the provided Benchmark.
+
+ Args:
+ benchmark_id: ID of the Benchmark to run.
+
+ run_name: Display name of the run.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return await self._post(
+ "/v1/benchmarks/start_run",
+ body=await async_maybe_transform(
+ {
+ "benchmark_id": benchmark_id,
+ "run_name": run_name,
+ },
+ benchmark_start_run_params.BenchmarkStartRunParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=BenchmarkRunView,
+ )
+
+
+class BenchmarksResourceWithRawResponse:
+ def __init__(self, benchmarks: BenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.create = to_raw_response_wrapper(
+ benchmarks.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ benchmarks.list,
+ )
+ self.start_run = to_raw_response_wrapper(
+ benchmarks.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> RunsResourceWithRawResponse:
+ return RunsResourceWithRawResponse(self._benchmarks.runs)
+
+
+class AsyncBenchmarksResourceWithRawResponse:
+ def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.create = async_to_raw_response_wrapper(
+ benchmarks.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ benchmarks.list,
+ )
+ self.start_run = async_to_raw_response_wrapper(
+ benchmarks.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> AsyncRunsResourceWithRawResponse:
+ return AsyncRunsResourceWithRawResponse(self._benchmarks.runs)
+
+
+class BenchmarksResourceWithStreamingResponse:
+ def __init__(self, benchmarks: BenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.create = to_streamed_response_wrapper(
+ benchmarks.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ benchmarks.list,
+ )
+ self.start_run = to_streamed_response_wrapper(
+ benchmarks.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> RunsResourceWithStreamingResponse:
+ return RunsResourceWithStreamingResponse(self._benchmarks.runs)
+
+
+class AsyncBenchmarksResourceWithStreamingResponse:
+ def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
+ self._benchmarks = benchmarks
+
+ self.create = async_to_streamed_response_wrapper(
+ benchmarks.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ benchmarks.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ benchmarks.list,
+ )
+ self.start_run = async_to_streamed_response_wrapper(
+ benchmarks.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> AsyncRunsResourceWithStreamingResponse:
+ return AsyncRunsResourceWithStreamingResponse(self._benchmarks.runs)
diff --git a/src/runloop_api_client/resources/benchmarks/runs.py b/src/runloop_api_client/resources/benchmarks/runs.py
new file mode 100644
index 000000000..3a5527345
--- /dev/null
+++ b/src/runloop_api_client/resources/benchmarks/runs.py
@@ -0,0 +1,365 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.benchmarks import run_list_params
+from ...types.benchmark_run_view import BenchmarkRunView
+from ...types.benchmark_run_list_view import BenchmarkRunListView
+
+__all__ = ["RunsResource", "AsyncRunsResource"]
+
+
+class RunsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RunsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return RunsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RunsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return RunsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkRunView:
+ """
+ Get a BenchmarkRun given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._get(
+ f"/v1/benchmarks/runs/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BenchmarkRunView,
+ )
+
+ def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkRunListView:
+ """
+ List all BenchmarkRuns matching filter.
+
+ Args:
+ limit: The limit of items to return. Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v1/benchmarks/runs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ run_list_params.RunListParams,
+ ),
+ ),
+ cast_to=BenchmarkRunListView,
+ )
+
+ def complete(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> BenchmarkRunView:
+ """
+ Complete a currently running BenchmarkRun.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._post(
+ f"/v1/benchmarks/runs/{id}/complete",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=BenchmarkRunView,
+ )
+
+
+class AsyncRunsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRunsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRunsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return AsyncRunsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkRunView:
+ """
+ Get a BenchmarkRun given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._get(
+ f"/v1/benchmarks/runs/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BenchmarkRunView,
+ )
+
+ async def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BenchmarkRunListView:
+ """
+ List all BenchmarkRuns matching filter.
+
+ Args:
+ limit: The limit of items to return. Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v1/benchmarks/runs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ run_list_params.RunListParams,
+ ),
+ ),
+ cast_to=BenchmarkRunListView,
+ )
+
+ async def complete(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> BenchmarkRunView:
+ """
+ Complete a currently running BenchmarkRun.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._post(
+ f"/v1/benchmarks/runs/{id}/complete",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=BenchmarkRunView,
+ )
+
+
+class RunsResourceWithRawResponse:
+ def __init__(self, runs: RunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = to_raw_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ runs.list,
+ )
+ self.complete = to_raw_response_wrapper(
+ runs.complete,
+ )
+
+
+class AsyncRunsResourceWithRawResponse:
+ def __init__(self, runs: AsyncRunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = async_to_raw_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ runs.list,
+ )
+ self.complete = async_to_raw_response_wrapper(
+ runs.complete,
+ )
+
+
+class RunsResourceWithStreamingResponse:
+ def __init__(self, runs: RunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = to_streamed_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ runs.list,
+ )
+ self.complete = to_streamed_response_wrapper(
+ runs.complete,
+ )
+
+
+class AsyncRunsResourceWithStreamingResponse:
+ def __init__(self, runs: AsyncRunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ runs.list,
+ )
+ self.complete = async_to_streamed_response_wrapper(
+ runs.complete,
+ )
diff --git a/src/runloop_api_client/resources/blueprints.py b/src/runloop_api_client/resources/blueprints.py
index 78dae9d88..124f0a139 100644
--- a/src/runloop_api_client/resources/blueprints.py
+++ b/src/runloop_api_client/resources/blueprints.py
@@ -35,7 +35,7 @@ class BlueprintsResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> BlueprintsResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
@@ -311,7 +311,7 @@ class AsyncBlueprintsResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncBlueprintsResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
diff --git a/src/runloop_api_client/resources/devboxes/devboxes.py b/src/runloop_api_client/resources/devboxes/devboxes.py
index 8de439e48..3d27b1458 100644
--- a/src/runloop_api_client/resources/devboxes/devboxes.py
+++ b/src/runloop_api_client/resources/devboxes/devboxes.py
@@ -107,7 +107,7 @@ def executions(self) -> ExecutionsResource:
@cached_property
def with_raw_response(self) -> DevboxesResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
@@ -1230,7 +1230,7 @@ def executions(self) -> AsyncExecutionsResource:
@cached_property
def with_raw_response(self) -> AsyncDevboxesResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
diff --git a/src/runloop_api_client/resources/devboxes/executions.py b/src/runloop_api_client/resources/devboxes/executions.py
index 7184242ec..9cbe048de 100755
--- a/src/runloop_api_client/resources/devboxes/executions.py
+++ b/src/runloop_api_client/resources/devboxes/executions.py
@@ -33,7 +33,7 @@ class ExecutionsResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ExecutionsResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
@@ -306,7 +306,7 @@ class AsyncExecutionsResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncExecutionsResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
diff --git a/src/runloop_api_client/resources/devboxes/logs.py b/src/runloop_api_client/resources/devboxes/logs.py
index a9fa13101..05b80f362 100644
--- a/src/runloop_api_client/resources/devboxes/logs.py
+++ b/src/runloop_api_client/resources/devboxes/logs.py
@@ -28,7 +28,7 @@ class LogsResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> LogsResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
@@ -98,7 +98,7 @@ class AsyncLogsResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncLogsResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
diff --git a/src/runloop_api_client/resources/devboxes/lsp.py b/src/runloop_api_client/resources/devboxes/lsp.py
index 86835a9b0..76cdccc74 100644
--- a/src/runloop_api_client/resources/devboxes/lsp.py
+++ b/src/runloop_api_client/resources/devboxes/lsp.py
@@ -59,7 +59,7 @@ class LspResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> LspResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
@@ -750,7 +750,7 @@ class AsyncLspResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncLspResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
diff --git a/src/runloop_api_client/resources/repositories.py b/src/runloop_api_client/resources/repositories.py
index c65b4133b..115ed94a4 100644
--- a/src/runloop_api_client/resources/repositories.py
+++ b/src/runloop_api_client/resources/repositories.py
@@ -30,7 +30,7 @@ class RepositoriesResource(SyncAPIResource):
@cached_property
def with_raw_response(self) -> RepositoriesResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
@@ -258,7 +258,7 @@ class AsyncRepositoriesResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncRepositoriesResourceWithRawResponse:
"""
- This property can be used as a prefix for any HTTP method call to return the
+ This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
diff --git a/src/runloop_api_client/resources/scenarios/__init__.py b/src/runloop_api_client/resources/scenarios/__init__.py
new file mode 100644
index 000000000..88b900e36
--- /dev/null
+++ b/src/runloop_api_client/resources/scenarios/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .runs import (
+ RunsResource,
+ AsyncRunsResource,
+ RunsResourceWithRawResponse,
+ AsyncRunsResourceWithRawResponse,
+ RunsResourceWithStreamingResponse,
+ AsyncRunsResourceWithStreamingResponse,
+)
+from .scenarios import (
+ ScenariosResource,
+ AsyncScenariosResource,
+ ScenariosResourceWithRawResponse,
+ AsyncScenariosResourceWithRawResponse,
+ ScenariosResourceWithStreamingResponse,
+ AsyncScenariosResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "RunsResource",
+ "AsyncRunsResource",
+ "RunsResourceWithRawResponse",
+ "AsyncRunsResourceWithRawResponse",
+ "RunsResourceWithStreamingResponse",
+ "AsyncRunsResourceWithStreamingResponse",
+ "ScenariosResource",
+ "AsyncScenariosResource",
+ "ScenariosResourceWithRawResponse",
+ "AsyncScenariosResourceWithRawResponse",
+ "ScenariosResourceWithStreamingResponse",
+ "AsyncScenariosResourceWithStreamingResponse",
+]
diff --git a/src/runloop_api_client/resources/scenarios/runs.py b/src/runloop_api_client/resources/scenarios/runs.py
new file mode 100644
index 000000000..cec293027
--- /dev/null
+++ b/src/runloop_api_client/resources/scenarios/runs.py
@@ -0,0 +1,457 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.scenarios import run_list_params
+from ...types.scenario_run_view import ScenarioRunView
+from ...types.scenario_run_list_view import ScenarioRunListView
+
+__all__ = ["RunsResource", "AsyncRunsResource"]
+
+
+class RunsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RunsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return RunsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RunsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return RunsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioRunView:
+ """
+ Get a ScenarioRun given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._get(
+ f"/v1/scenarios/runs/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+ def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioRunListView:
+ """
+ List all ScenarioRuns matching filter.
+
+ Args:
+ limit: The limit of items to return. Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v1/scenarios/runs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ run_list_params.RunListParams,
+ ),
+ ),
+ cast_to=ScenarioRunListView,
+ )
+
+ def complete(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioRunView:
+ """
+ Complete a currently running ScenarioRun.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._post(
+ f"/v1/scenarios/runs/{id}/complete",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+ def score(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioRunView:
+ """
+ Score a currently running ScenarioRun.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._post(
+ f"/v1/scenarios/runs/{id}/score",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+
+class AsyncRunsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRunsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRunsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRunsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return AsyncRunsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioRunView:
+ """
+ Get a ScenarioRun given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._get(
+ f"/v1/scenarios/runs/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+ async def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioRunListView:
+ """
+ List all ScenarioRuns matching filter.
+
+ Args:
+ limit: The limit of items to return. Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v1/scenarios/runs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ run_list_params.RunListParams,
+ ),
+ ),
+ cast_to=ScenarioRunListView,
+ )
+
+ async def complete(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioRunView:
+ """
+ Complete a currently running ScenarioRun.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._post(
+ f"/v1/scenarios/runs/{id}/complete",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+ async def score(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioRunView:
+ """
+ Score a currently running ScenarioRun.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._post(
+ f"/v1/scenarios/runs/{id}/score",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+
+class RunsResourceWithRawResponse:
+ def __init__(self, runs: RunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = to_raw_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ runs.list,
+ )
+ self.complete = to_raw_response_wrapper(
+ runs.complete,
+ )
+ self.score = to_raw_response_wrapper(
+ runs.score,
+ )
+
+
+class AsyncRunsResourceWithRawResponse:
+ def __init__(self, runs: AsyncRunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = async_to_raw_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ runs.list,
+ )
+ self.complete = async_to_raw_response_wrapper(
+ runs.complete,
+ )
+ self.score = async_to_raw_response_wrapper(
+ runs.score,
+ )
+
+
+class RunsResourceWithStreamingResponse:
+ def __init__(self, runs: RunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = to_streamed_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ runs.list,
+ )
+ self.complete = to_streamed_response_wrapper(
+ runs.complete,
+ )
+ self.score = to_streamed_response_wrapper(
+ runs.score,
+ )
+
+
+class AsyncRunsResourceWithStreamingResponse:
+ def __init__(self, runs: AsyncRunsResource) -> None:
+ self._runs = runs
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ runs.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ runs.list,
+ )
+ self.complete = async_to_streamed_response_wrapper(
+ runs.complete,
+ )
+ self.score = async_to_streamed_response_wrapper(
+ runs.score,
+ )
diff --git a/src/runloop_api_client/resources/scenarios/scenarios.py b/src/runloop_api_client/resources/scenarios/scenarios.py
new file mode 100644
index 000000000..ec2a73dcc
--- /dev/null
+++ b/src/runloop_api_client/resources/scenarios/scenarios.py
@@ -0,0 +1,567 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import httpx
+
+from .runs import (
+ RunsResource,
+ AsyncRunsResource,
+ RunsResourceWithRawResponse,
+ AsyncRunsResourceWithRawResponse,
+ RunsResourceWithStreamingResponse,
+ AsyncRunsResourceWithStreamingResponse,
+)
+from ...types import (
+ scenario_list_params,
+ scenario_create_params,
+ scenario_start_run_params,
+)
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.scenario_view import ScenarioView
+from ...types.scenario_run_view import ScenarioRunView
+from ...types.scenario_list_view import ScenarioListView
+from ...types.input_context_parameters_param import InputContextParametersParam
+from ...types.scoring_contract_parameters_param import ScoringContractParametersParam
+from ...types.scenario_environment_parameters_param import ScenarioEnvironmentParametersParam
+
+__all__ = ["ScenariosResource", "AsyncScenariosResource"]
+
+
+class ScenariosResource(SyncAPIResource):
+ @cached_property
+ def runs(self) -> RunsResource:
+ return RunsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ScenariosResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return ScenariosResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ScenariosResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return ScenariosResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ input_context: InputContextParametersParam,
+ name: str,
+ scoring_contract: ScoringContractParametersParam,
+ environment_parameters: Optional[ScenarioEnvironmentParametersParam] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioView:
+ """
+ Create a Scenario, a repeatable AI coding evaluation test that defines the
+ starting environment as well as evaluation success criteria.
+
+ Args:
+ input_context: The input context for the Scenario.
+
+ name: Name of the scenario.
+
+ scoring_contract: The scoring contract for the Scenario.
+
+ environment_parameters: The Environment in which the Scenario will run.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return self._post(
+ "/v1/scenarios",
+ body=maybe_transform(
+ {
+ "input_context": input_context,
+ "name": name,
+ "scoring_contract": scoring_contract,
+ "environment_parameters": environment_parameters,
+ },
+ scenario_create_params.ScenarioCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioView,
+ )
+
+ def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioView:
+ """
+ Get a previously created scenario.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._get(
+ f"/v1/scenarios/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScenarioView,
+ )
+
+ def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioListView:
+ """List all Scenarios matching filter.
+
+ Args:
+ limit: The limit of items to return.
+
+ Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v1/scenarios",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ scenario_list_params.ScenarioListParams,
+ ),
+ ),
+ cast_to=ScenarioListView,
+ )
+
+ def start_run(
+ self,
+ *,
+ scenario_id: str,
+ benchmark_run_id: Optional[str] | NotGiven = NOT_GIVEN,
+ run_name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioRunView:
+ """
+ Start a new ScenarioRun based on the provided Scenario.
+
+ Args:
+ scenario_id: ID of the Scenario to run.
+
+ benchmark_run_id: Benchmark to associate the run.
+
+ run_name: Display name of the run.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return self._post(
+ "/v1/scenarios/start_run",
+ body=maybe_transform(
+ {
+ "scenario_id": scenario_id,
+ "benchmark_run_id": benchmark_run_id,
+ "run_name": run_name,
+ },
+ scenario_start_run_params.ScenarioStartRunParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+
+class AsyncScenariosResource(AsyncAPIResource):
+ @cached_property
+ def runs(self) -> AsyncRunsResource:
+ return AsyncRunsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncScenariosResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncScenariosResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncScenariosResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/runloopai/api-client-python#with_streaming_response
+ """
+ return AsyncScenariosResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ input_context: InputContextParametersParam,
+ name: str,
+ scoring_contract: ScoringContractParametersParam,
+ environment_parameters: Optional[ScenarioEnvironmentParametersParam] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioView:
+ """
+ Create a Scenario, a repeatable AI coding evaluation test that defines the
+ starting environment as well as evaluation success criteria.
+
+ Args:
+ input_context: The input context for the Scenario.
+
+ name: Name of the scenario.
+
+ scoring_contract: The scoring contract for the Scenario.
+
+ environment_parameters: The Environment in which the Scenario will run.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return await self._post(
+ "/v1/scenarios",
+ body=await async_maybe_transform(
+ {
+ "input_context": input_context,
+ "name": name,
+ "scoring_contract": scoring_contract,
+ "environment_parameters": environment_parameters,
+ },
+ scenario_create_params.ScenarioCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioView,
+ )
+
+ async def retrieve(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioView:
+ """
+ Get a previously created scenario.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._get(
+ f"/v1/scenarios/{id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScenarioView,
+ )
+
+ async def list(
+ self,
+ *,
+ limit: int | NotGiven = NOT_GIVEN,
+ starting_after: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ScenarioListView:
+ """List all Scenarios matching filter.
+
+ Args:
+ limit: The limit of items to return.
+
+ Default is 20.
+
+ starting_after: Load the next page of data starting after the item with the given ID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v1/scenarios",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "limit": limit,
+ "starting_after": starting_after,
+ },
+ scenario_list_params.ScenarioListParams,
+ ),
+ ),
+ cast_to=ScenarioListView,
+ )
+
+ async def start_run(
+ self,
+ *,
+ scenario_id: str,
+ benchmark_run_id: Optional[str] | NotGiven = NOT_GIVEN,
+ run_name: Optional[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ idempotency_key: str | None = None,
+ ) -> ScenarioRunView:
+ """
+ Start a new ScenarioRun based on the provided Scenario.
+
+ Args:
+ scenario_id: ID of the Scenario to run.
+
+ benchmark_run_id: Benchmark to associate the run.
+
+ run_name: Display name of the run.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+
+ idempotency_key: Specify a custom idempotency key for this request
+ """
+ return await self._post(
+ "/v1/scenarios/start_run",
+ body=await async_maybe_transform(
+ {
+ "scenario_id": scenario_id,
+ "benchmark_run_id": benchmark_run_id,
+ "run_name": run_name,
+ },
+ scenario_start_run_params.ScenarioStartRunParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ idempotency_key=idempotency_key,
+ ),
+ cast_to=ScenarioRunView,
+ )
+
+
+class ScenariosResourceWithRawResponse:
+ def __init__(self, scenarios: ScenariosResource) -> None:
+ self._scenarios = scenarios
+
+ self.create = to_raw_response_wrapper(
+ scenarios.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ scenarios.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ scenarios.list,
+ )
+ self.start_run = to_raw_response_wrapper(
+ scenarios.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> RunsResourceWithRawResponse:
+ return RunsResourceWithRawResponse(self._scenarios.runs)
+
+
+class AsyncScenariosResourceWithRawResponse:
+ def __init__(self, scenarios: AsyncScenariosResource) -> None:
+ self._scenarios = scenarios
+
+ self.create = async_to_raw_response_wrapper(
+ scenarios.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ scenarios.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ scenarios.list,
+ )
+ self.start_run = async_to_raw_response_wrapper(
+ scenarios.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> AsyncRunsResourceWithRawResponse:
+ return AsyncRunsResourceWithRawResponse(self._scenarios.runs)
+
+
+class ScenariosResourceWithStreamingResponse:
+ def __init__(self, scenarios: ScenariosResource) -> None:
+ self._scenarios = scenarios
+
+ self.create = to_streamed_response_wrapper(
+ scenarios.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ scenarios.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ scenarios.list,
+ )
+ self.start_run = to_streamed_response_wrapper(
+ scenarios.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> RunsResourceWithStreamingResponse:
+ return RunsResourceWithStreamingResponse(self._scenarios.runs)
+
+
+class AsyncScenariosResourceWithStreamingResponse:
+ def __init__(self, scenarios: AsyncScenariosResource) -> None:
+ self._scenarios = scenarios
+
+ self.create = async_to_streamed_response_wrapper(
+ scenarios.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ scenarios.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ scenarios.list,
+ )
+ self.start_run = async_to_streamed_response_wrapper(
+ scenarios.start_run,
+ )
+
+ @cached_property
+ def runs(self) -> AsyncRunsResourceWithStreamingResponse:
+ return AsyncRunsResourceWithStreamingResponse(self._scenarios.runs)
diff --git a/src/runloop_api_client/types/__init__.py b/src/runloop_api_client/types/__init__.py
index d5c58545b..58f1cf7af 100644
--- a/src/runloop_api_client/types/__init__.py
+++ b/src/runloop_api_client/types/__init__.py
@@ -8,22 +8,37 @@
CodeMountParameters as CodeMountParameters,
)
from .devbox_view import DevboxView as DevboxView
+from .scenario_view import ScenarioView as ScenarioView
+from .benchmark_view import BenchmarkView as BenchmarkView
from .blueprint_view import BlueprintView as BlueprintView
from .devbox_list_view import DevboxListView as DevboxListView
+from .scenario_run_view import ScenarioRunView as ScenarioRunView
+from .benchmark_run_view import BenchmarkRunView as BenchmarkRunView
from .devbox_list_params import DevboxListParams as DevboxListParams
from .devbox_tunnel_view import DevboxTunnelView as DevboxTunnelView
+from .scenario_list_view import ScenarioListView as ScenarioListView
+from .benchmark_list_view import BenchmarkListView as BenchmarkListView
from .blueprint_build_log import BlueprintBuildLog as BlueprintBuildLog
from .blueprint_list_view import BlueprintListView as BlueprintListView
from .devbox_create_params import DevboxCreateParams as DevboxCreateParams
from .devbox_snapshot_view import DevboxSnapshotView as DevboxSnapshotView
+from .scenario_list_params import ScenarioListParams as ScenarioListParams
+from .benchmark_list_params import BenchmarkListParams as BenchmarkListParams
from .blueprint_list_params import BlueprintListParams as BlueprintListParams
from .blueprint_preview_view import BlueprintPreviewView as BlueprintPreviewView
from .repository_list_params import RepositoryListParams as RepositoryListParams
+from .scenario_create_params import ScenarioCreateParams as ScenarioCreateParams
+from .scenario_run_list_view import ScenarioRunListView as ScenarioRunListView
+from .benchmark_create_params import BenchmarkCreateParams as BenchmarkCreateParams
+from .benchmark_run_list_view import BenchmarkRunListView as BenchmarkRunListView
from .blueprint_create_params import BlueprintCreateParams as BlueprintCreateParams
from .blueprint_preview_params import BlueprintPreviewParams as BlueprintPreviewParams
+from .input_context_parameters import InputContextParameters as InputContextParameters
from .repository_create_params import RepositoryCreateParams as RepositoryCreateParams
from .devbox_snapshot_list_view import DevboxSnapshotListView as DevboxSnapshotListView
from .devbox_upload_file_params import DevboxUploadFileParams as DevboxUploadFileParams
+from .scenario_start_run_params import ScenarioStartRunParams as ScenarioStartRunParams
+from .benchmark_start_run_params import BenchmarkStartRunParams as BenchmarkStartRunParams
from .blueprint_build_parameters import BlueprintBuildParameters as BlueprintBuildParameters
from .devbox_execute_sync_params import DevboxExecuteSyncParams as DevboxExecuteSyncParams
from .repository_connection_view import RepositoryConnectionView as RepositoryConnectionView
@@ -33,13 +48,24 @@
from .devbox_execute_async_params import DevboxExecuteAsyncParams as DevboxExecuteAsyncParams
from .devbox_remove_tunnel_params import DevboxRemoveTunnelParams as DevboxRemoveTunnelParams
from .devbox_snapshot_disk_params import DevboxSnapshotDiskParams as DevboxSnapshotDiskParams
+from .scoring_contract_parameters import ScoringContractParameters as ScoringContractParameters
+from .scoring_function_parameters import ScoringFunctionParameters as ScoringFunctionParameters
from .devbox_execution_detail_view import DevboxExecutionDetailView as DevboxExecutionDetailView
from .repository_version_list_view import RepositoryVersionListView as RepositoryVersionListView
+from .scoring_contract_result_view import ScoringContractResultView as ScoringContractResultView
+from .scoring_function_result_view import ScoringFunctionResultView as ScoringFunctionResultView
from .blueprint_build_logs_list_view import BlueprintBuildLogsListView as BlueprintBuildLogsListView
from .devbox_create_ssh_key_response import DevboxCreateSSHKeyResponse as DevboxCreateSSHKeyResponse
+from .input_context_parameters_param import InputContextParametersParam as InputContextParametersParam
from .repository_connection_list_view import RepositoryConnectionListView as RepositoryConnectionListView
+from .scenario_environment_parameters import ScenarioEnvironmentParameters as ScenarioEnvironmentParameters
from .devbox_read_file_contents_params import DevboxReadFileContentsParams as DevboxReadFileContentsParams
from .devbox_list_disk_snapshots_params import DevboxListDiskSnapshotsParams as DevboxListDiskSnapshotsParams
from .devbox_write_file_contents_params import DevboxWriteFileContentsParams as DevboxWriteFileContentsParams
+from .scoring_contract_parameters_param import ScoringContractParametersParam as ScoringContractParametersParam
+from .scoring_function_parameters_param import ScoringFunctionParametersParam as ScoringFunctionParametersParam
from .devbox_async_execution_detail_view import DevboxAsyncExecutionDetailView as DevboxAsyncExecutionDetailView
from .devbox_read_file_contents_response import DevboxReadFileContentsResponse as DevboxReadFileContentsResponse
+from .scenario_environment_parameters_param import (
+ ScenarioEnvironmentParametersParam as ScenarioEnvironmentParametersParam,
+)
diff --git a/src/runloop_api_client/types/benchmark_create_params.py b/src/runloop_api_client/types/benchmark_create_params.py
new file mode 100644
index 000000000..3fea3d2a0
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_create_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["BenchmarkCreateParams"]
+
+
+class BenchmarkCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """The name of the Benchmark."""
+
+ scenario_ids: Optional[List[str]]
+ """The Scenario IDs that make up the Benchmark."""
diff --git a/src/runloop_api_client/types/benchmark_list_params.py b/src/runloop_api_client/types/benchmark_list_params.py
new file mode 100644
index 000000000..51b2b1320
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["BenchmarkListParams"]
+
+
+class BenchmarkListParams(TypedDict, total=False):
+ limit: int
+ """The limit of items to return. Default is 20."""
+
+ starting_after: str
+ """Load the next page of data starting after the item with the given ID."""
diff --git a/src/runloop_api_client/types/benchmark_list_view.py b/src/runloop_api_client/types/benchmark_list_view.py
new file mode 100644
index 000000000..40ebd65dd
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_list_view.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .benchmark_view import BenchmarkView
+
+__all__ = ["BenchmarkListView"]
+
+
+class BenchmarkListView(BaseModel):
+ benchmarks: List[BenchmarkView]
+ """List of Benchmarks matching filter."""
+
+ has_more: bool
+
+ total_count: int
diff --git a/src/runloop_api_client/types/benchmark_run_list_view.py b/src/runloop_api_client/types/benchmark_run_list_view.py
new file mode 100644
index 000000000..4a8a1fda0
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_run_list_view.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .benchmark_run_view import BenchmarkRunView
+
+__all__ = ["BenchmarkRunListView"]
+
+
+class BenchmarkRunListView(BaseModel):
+ has_more: bool
+
+ runs: List[BenchmarkRunView]
+ """List of BenchmarkRuns matching filter."""
+
+ total_count: int
diff --git a/src/runloop_api_client/types/benchmark_run_view.py b/src/runloop_api_client/types/benchmark_run_view.py
new file mode 100644
index 000000000..08507340e
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_run_view.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["BenchmarkRunView"]
+
+
+class BenchmarkRunView(BaseModel):
+ id: str
+ """The ID of the BenchmarkRun."""
+
+ benchmark_id: str
+ """The ID of the Benchmark."""
+
+ state: Literal["running", "completed"]
+ """The state of the BenchmarkRun."""
+
+ name: Optional[str] = None
+ """The name of the BenchmarkRun."""
diff --git a/src/runloop_api_client/types/benchmark_start_run_params.py b/src/runloop_api_client/types/benchmark_start_run_params.py
new file mode 100644
index 000000000..39d618669
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_start_run_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["BenchmarkStartRunParams"]
+
+
+class BenchmarkStartRunParams(TypedDict, total=False):
+ benchmark_id: Required[str]
+ """ID of the Benchmark to run."""
+
+ run_name: Optional[str]
+ """Display name of the run."""
diff --git a/src/runloop_api_client/types/benchmark_view.py b/src/runloop_api_client/types/benchmark_view.py
new file mode 100644
index 000000000..071415151
--- /dev/null
+++ b/src/runloop_api_client/types/benchmark_view.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from pydantic import Field as FieldInfo
+
+from .._models import BaseModel
+
+__all__ = ["BenchmarkView"]
+
+
+class BenchmarkView(BaseModel):
+ id: str
+ """The ID of the Benchmark."""
+
+ name: str
+ """The name of the Benchmark."""
+
+ scenario_ids: List[str] = FieldInfo(alias="scenarioIds")
+ """List of Scenario IDs that make up the benchmark."""
diff --git a/src/runloop_api_client/types/benchmarks/__init__.py b/src/runloop_api_client/types/benchmarks/__init__.py
new file mode 100644
index 000000000..4bc4e1112
--- /dev/null
+++ b/src/runloop_api_client/types/benchmarks/__init__.py
@@ -0,0 +1,5 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .run_list_params import RunListParams as RunListParams
diff --git a/src/runloop_api_client/types/benchmarks/run_list_params.py b/src/runloop_api_client/types/benchmarks/run_list_params.py
new file mode 100644
index 000000000..a8be85a7f
--- /dev/null
+++ b/src/runloop_api_client/types/benchmarks/run_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["RunListParams"]
+
+
+class RunListParams(TypedDict, total=False):
+ limit: int
+ """The limit of items to return. Default is 20."""
+
+ starting_after: str
+ """Load the next page of data starting after the item with the given ID."""
diff --git a/src/runloop_api_client/types/devboxes/document_symbol.py b/src/runloop_api_client/types/devboxes/document_symbol.py
index 0e58f87ed..06f8db905 100644
--- a/src/runloop_api_client/types/devboxes/document_symbol.py
+++ b/src/runloop_api_client/types/devboxes/document_symbol.py
@@ -39,7 +39,7 @@ class DocumentSymbol(BaseModel):
e.g the name of a function. Must be contained by the `range`.
"""
- children: Optional[List[DocumentSymbol]] = None
+ children: Optional[List["DocumentSymbol"]] = None
"""Children of this symbol, e.g. properties of a class."""
deprecated: Optional[bool] = None
diff --git a/src/runloop_api_client/types/input_context_parameters.py b/src/runloop_api_client/types/input_context_parameters.py
new file mode 100644
index 000000000..85f6f3649
--- /dev/null
+++ b/src/runloop_api_client/types/input_context_parameters.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["InputContextParameters"]
+
+
+class InputContextParameters(BaseModel):
+ problem_statement: str
+ """The problem statement for the Scenario."""
diff --git a/src/runloop_api_client/types/input_context_parameters_param.py b/src/runloop_api_client/types/input_context_parameters_param.py
new file mode 100644
index 000000000..20df18dc2
--- /dev/null
+++ b/src/runloop_api_client/types/input_context_parameters_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["InputContextParametersParam"]
+
+
+class InputContextParametersParam(TypedDict, total=False):
+ problem_statement: Required[str]
+ """The problem statement for the Scenario."""
diff --git a/src/runloop_api_client/types/scenario_create_params.py b/src/runloop_api_client/types/scenario_create_params.py
new file mode 100644
index 000000000..5d1311da7
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_create_params.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+from .input_context_parameters_param import InputContextParametersParam
+from .scoring_contract_parameters_param import ScoringContractParametersParam
+from .scenario_environment_parameters_param import ScenarioEnvironmentParametersParam
+
+__all__ = ["ScenarioCreateParams"]
+
+
+class ScenarioCreateParams(TypedDict, total=False):
+ input_context: Required[InputContextParametersParam]
+ """The input context for the Scenario."""
+
+ name: Required[str]
+ """Name of the scenario."""
+
+ scoring_contract: Required[ScoringContractParametersParam]
+ """The scoring contract for the Scenario."""
+
+ environment_parameters: Optional[ScenarioEnvironmentParametersParam]
+ """The Environment in which the Scenario will run."""
diff --git a/src/runloop_api_client/types/scenario_environment_parameters.py b/src/runloop_api_client/types/scenario_environment_parameters.py
new file mode 100644
index 000000000..0aede40b8
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_environment_parameters.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["ScenarioEnvironmentParameters"]
+
+
+class ScenarioEnvironmentParameters(BaseModel):
+ blueprint_id: Optional[str] = None
+ """Use the blueprint with matching ID."""
+
+ prebuilt_id: Optional[str] = None
+ """Use the prebuilt with matching ID."""
+
+ snapshot_id: Optional[str] = None
+ """Use the snapshot with matching ID."""
diff --git a/src/runloop_api_client/types/scenario_environment_parameters_param.py b/src/runloop_api_client/types/scenario_environment_parameters_param.py
new file mode 100644
index 000000000..457834cb2
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_environment_parameters_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import TypedDict
+
+__all__ = ["ScenarioEnvironmentParametersParam"]
+
+
+class ScenarioEnvironmentParametersParam(TypedDict, total=False):
+ blueprint_id: Optional[str]
+ """Use the blueprint with matching ID."""
+
+ prebuilt_id: Optional[str]
+ """Use the prebuilt with matching ID."""
+
+ snapshot_id: Optional[str]
+ """Use the snapshot with matching ID."""
diff --git a/src/runloop_api_client/types/scenario_list_params.py b/src/runloop_api_client/types/scenario_list_params.py
new file mode 100644
index 000000000..01dac999b
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ScenarioListParams"]
+
+
+class ScenarioListParams(TypedDict, total=False):
+ limit: int
+ """The limit of items to return. Default is 20."""
+
+ starting_after: str
+ """Load the next page of data starting after the item with the given ID."""
diff --git a/src/runloop_api_client/types/scenario_list_view.py b/src/runloop_api_client/types/scenario_list_view.py
new file mode 100644
index 000000000..9afe534cf
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_list_view.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .scenario_view import ScenarioView
+
+__all__ = ["ScenarioListView"]
+
+
+class ScenarioListView(BaseModel):
+ has_more: bool
+
+ scenarios: List[ScenarioView]
+ """List of Scenarios matching filter."""
+
+ total_count: int
diff --git a/src/runloop_api_client/types/scenario_run_list_view.py b/src/runloop_api_client/types/scenario_run_list_view.py
new file mode 100644
index 000000000..73494f84c
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_run_list_view.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .scenario_run_view import ScenarioRunView
+
+__all__ = ["ScenarioRunListView"]
+
+
+class ScenarioRunListView(BaseModel):
+ has_more: bool
+
+ runs: List[ScenarioRunView]
+ """List of ScenarioRuns matching filter."""
+
+ total_count: int
diff --git a/src/runloop_api_client/types/scenario_run_view.py b/src/runloop_api_client/types/scenario_run_view.py
new file mode 100644
index 000000000..33c18100f
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_run_view.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .scoring_contract_result_view import ScoringContractResultView
+
+__all__ = ["ScenarioRunView"]
+
+
+class ScenarioRunView(BaseModel):
+ id: str
+ """ID of the ScenarioRun."""
+
+ devbox_id: str
+ """ID of the Devbox on which the Scenario is running."""
+
+ scenario_id: str
+ """ID of the Scenario that has been run."""
+
+ state: Literal["running", "scoring", "completed", "canceled", "timeout", "failed"]
+ """The state of the ScenarioRun."""
+
+ benchmark_run_id: Optional[str] = None
+ """ID of the BenchmarkRun that this Scenario is associated with, if any."""
+
+ duration_ms: Optional[int] = None
+ """Duration scenario took to run."""
+
+ scoring_contract_result: Optional[ScoringContractResultView] = None
+ """The input context for the Scenario."""
diff --git a/src/runloop_api_client/types/scenario_start_run_params.py b/src/runloop_api_client/types/scenario_start_run_params.py
new file mode 100644
index 000000000..6704db386
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_start_run_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ScenarioStartRunParams"]
+
+
+class ScenarioStartRunParams(TypedDict, total=False):
+ scenario_id: Required[str]
+ """ID of the Scenario to run."""
+
+ benchmark_run_id: Optional[str]
+ """Benchmark to associate the run."""
+
+ run_name: Optional[str]
+ """Display name of the run."""
diff --git a/src/runloop_api_client/types/scenario_view.py b/src/runloop_api_client/types/scenario_view.py
new file mode 100644
index 000000000..30f075ec2
--- /dev/null
+++ b/src/runloop_api_client/types/scenario_view.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+from .input_context_parameters import InputContextParameters
+from .scoring_contract_parameters import ScoringContractParameters
+from .scenario_environment_parameters import ScenarioEnvironmentParameters
+
+__all__ = ["ScenarioView"]
+
+
+class ScenarioView(BaseModel):
+ id: str
+ """The ID of the Scenario."""
+
+ input_context: InputContextParameters
+ """The input context for the Scenario."""
+
+ name: str
+ """The name of the Scenario."""
+
+ scoring_contract: ScoringContractParameters
+ """The scoring contract for the Scenario."""
+
+ environment: Optional[ScenarioEnvironmentParameters] = None
+ """The Environment in which the Scenario is run."""
diff --git a/src/runloop_api_client/types/scenarios/__init__.py b/src/runloop_api_client/types/scenarios/__init__.py
new file mode 100644
index 000000000..4bc4e1112
--- /dev/null
+++ b/src/runloop_api_client/types/scenarios/__init__.py
@@ -0,0 +1,5 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .run_list_params import RunListParams as RunListParams
diff --git a/src/runloop_api_client/types/scenarios/run_list_params.py b/src/runloop_api_client/types/scenarios/run_list_params.py
new file mode 100644
index 000000000..a8be85a7f
--- /dev/null
+++ b/src/runloop_api_client/types/scenarios/run_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["RunListParams"]
+
+
+class RunListParams(TypedDict, total=False):
+ limit: int
+ """The limit of items to return. Default is 20."""
+
+ starting_after: str
+ """Load the next page of data starting after the item with the given ID."""
diff --git a/src/runloop_api_client/types/scoring_contract_parameters.py b/src/runloop_api_client/types/scoring_contract_parameters.py
new file mode 100644
index 000000000..5d59d1b64
--- /dev/null
+++ b/src/runloop_api_client/types/scoring_contract_parameters.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .scoring_function_parameters import ScoringFunctionParameters
+
+__all__ = ["ScoringContractParameters"]
+
+
+class ScoringContractParameters(BaseModel):
+ scoring_function_parameters: List[ScoringFunctionParameters]
+ """A list of scoring functions used to evaluate the Scenario."""
diff --git a/src/runloop_api_client/types/scoring_contract_parameters_param.py b/src/runloop_api_client/types/scoring_contract_parameters_param.py
new file mode 100644
index 000000000..a28a34b4e
--- /dev/null
+++ b/src/runloop_api_client/types/scoring_contract_parameters_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Required, TypedDict
+
+from .scoring_function_parameters_param import ScoringFunctionParametersParam
+
+__all__ = ["ScoringContractParametersParam"]
+
+
+class ScoringContractParametersParam(TypedDict, total=False):
+ scoring_function_parameters: Required[Iterable[ScoringFunctionParametersParam]]
+ """A list of scoring functions used to evaluate the Scenario."""
diff --git a/src/runloop_api_client/types/scoring_contract_result_view.py b/src/runloop_api_client/types/scoring_contract_result_view.py
new file mode 100644
index 000000000..823de83c4
--- /dev/null
+++ b/src/runloop_api_client/types/scoring_contract_result_view.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from .._models import BaseModel
+from .scoring_function_result_view import ScoringFunctionResultView
+
+__all__ = ["ScoringContractResultView"]
+
+
+class ScoringContractResultView(BaseModel):
+ score: float
+ """Total score for all scoring contracts. This will be a value between 0 and 1."""
+
+ scoring_function_results: List[ScoringFunctionResultView]
+ """List of all individual scoring function results."""
diff --git a/src/runloop_api_client/types/scoring_function_parameters.py b/src/runloop_api_client/types/scoring_function_parameters.py
new file mode 100644
index 000000000..815d88e6a
--- /dev/null
+++ b/src/runloop_api_client/types/scoring_function_parameters.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["ScoringFunctionParameters"]
+
+
+class ScoringFunctionParameters(BaseModel):
+ name: str
+ """Name of scoring function."""
+
+ weight: float
+ """Wight to apply to scoring function score.
+
+ Weights of all scoring functions should sum to 1.0.
+ """
+
+ bash_script: Optional[str] = None
+ """
+ A single bash script that sets up the environment, scores, and prints the final
+ score to standard out. Score should be an integer between 0 and 100, and look
+ like "score=[0..100].
+ """
diff --git a/src/runloop_api_client/types/scoring_function_parameters_param.py b/src/runloop_api_client/types/scoring_function_parameters_param.py
new file mode 100644
index 000000000..594760fd1
--- /dev/null
+++ b/src/runloop_api_client/types/scoring_function_parameters_param.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ScoringFunctionParametersParam"]
+
+
+class ScoringFunctionParametersParam(TypedDict, total=False):
+ name: Required[str]
+ """Name of scoring function."""
+
+ weight: Required[float]
+ """Wight to apply to scoring function score.
+
+ Weights of all scoring functions should sum to 1.0.
+ """
+
+ bash_script: Optional[str]
+ """
+ A single bash script that sets up the environment, scores, and prints the final
+ score to standard out. Score should be an integer between 0 and 100, and look
+ like "score=[0..100].
+ """
diff --git a/src/runloop_api_client/types/scoring_function_result_view.py b/src/runloop_api_client/types/scoring_function_result_view.py
new file mode 100644
index 000000000..7fcbcd50c
--- /dev/null
+++ b/src/runloop_api_client/types/scoring_function_result_view.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .._models import BaseModel
+
+__all__ = ["ScoringFunctionResultView"]
+
+
+class ScoringFunctionResultView(BaseModel):
+ output: str
+ """Log output of the scoring function."""
+
+ score: float
+ """Final score for the given scoring function."""
+
+ scoring_function_name: str
+ """Scoring function name that ran."""
diff --git a/tests/api_resources/benchmarks/__init__.py b/tests/api_resources/benchmarks/__init__.py
new file mode 100644
index 000000000..fd8019a9a
--- /dev/null
+++ b/tests/api_resources/benchmarks/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/benchmarks/test_runs.py b/tests/api_resources/benchmarks/test_runs.py
new file mode 100644
index 000000000..6f7ac0b8d
--- /dev/null
+++ b/tests/api_resources/benchmarks/test_runs.py
@@ -0,0 +1,240 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from runloop_api_client import Runloop, AsyncRunloop
+from runloop_api_client.types import BenchmarkRunView, BenchmarkRunListView
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRuns:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: Runloop) -> None:
+ run = client.benchmarks.runs.retrieve(
+ "id",
+ )
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Runloop) -> None:
+ response = client.benchmarks.runs.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Runloop) -> None:
+ with client.benchmarks.runs.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.benchmarks.runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: Runloop) -> None:
+ run = client.benchmarks.runs.list()
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: Runloop) -> None:
+ run = client.benchmarks.runs.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: Runloop) -> None:
+ response = client.benchmarks.runs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = response.parse()
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: Runloop) -> None:
+ with client.benchmarks.runs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = response.parse()
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_complete(self, client: Runloop) -> None:
+ run = client.benchmarks.runs.complete(
+ "id",
+ )
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ def test_raw_response_complete(self, client: Runloop) -> None:
+ response = client.benchmarks.runs.with_raw_response.complete(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ def test_streaming_response_complete(self, client: Runloop) -> None:
+ with client.benchmarks.runs.with_streaming_response.complete(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_complete(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.benchmarks.runs.with_raw_response.complete(
+ "",
+ )
+
+
+class TestAsyncRuns:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.benchmarks.runs.retrieve(
+ "id",
+ )
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.benchmarks.runs.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = await response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ async with async_client.benchmarks.runs.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = await response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.benchmarks.runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.benchmarks.runs.list()
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.benchmarks.runs.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.benchmarks.runs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = await response.parse()
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None:
+ async with async_client.benchmarks.runs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = await response.parse()
+ assert_matches_type(BenchmarkRunListView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_complete(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.benchmarks.runs.complete(
+ "id",
+ )
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ async def test_raw_response_complete(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.benchmarks.runs.with_raw_response.complete(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = await response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_complete(self, async_client: AsyncRunloop) -> None:
+ async with async_client.benchmarks.runs.with_streaming_response.complete(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = await response.parse()
+ assert_matches_type(BenchmarkRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_complete(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.benchmarks.runs.with_raw_response.complete(
+ "",
+ )
diff --git a/tests/api_resources/devboxes/test_lsp.py b/tests/api_resources/devboxes/test_lsp.py
index 8f02db8d8..05e7f4b2c 100644
--- a/tests/api_resources/devboxes/test_lsp.py
+++ b/tests/api_resources/devboxes/test_lsp.py
@@ -129,7 +129,7 @@ def test_method_code_actions_with_all_params(self, client: Runloop) -> None:
},
},
"code": 0,
- "code_description": {"href": "href"},
+ "code_description": {"href": "string"},
"data": {},
"related_information": [
{
@@ -144,7 +144,7 @@ def test_method_code_actions_with_all_params(self, client: Runloop) -> None:
"line": 0,
},
},
- "uri": "uri",
+ "uri": "string",
},
"message": "message",
}
@@ -208,7 +208,7 @@ def test_path_params_code_actions(self, client: Runloop) -> None:
def test_method_diagnostics(self, client: Runloop) -> None:
lsp = client.devboxes.lsp.diagnostics(
id="id",
- uri="uri",
+ uri="string",
)
assert_matches_type(DiagnosticsResponse, lsp, path=["response"])
@@ -216,7 +216,7 @@ def test_method_diagnostics(self, client: Runloop) -> None:
def test_raw_response_diagnostics(self, client: Runloop) -> None:
response = client.devboxes.lsp.with_raw_response.diagnostics(
id="id",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -228,7 +228,7 @@ def test_raw_response_diagnostics(self, client: Runloop) -> None:
def test_streaming_response_diagnostics(self, client: Runloop) -> None:
with client.devboxes.lsp.with_streaming_response.diagnostics(
id="id",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -243,14 +243,14 @@ def test_path_params_diagnostics(self, client: Runloop) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
client.devboxes.lsp.with_raw_response.diagnostics(
id="",
- uri="uri",
+ uri="string",
)
@parametrize
def test_method_document_symbols(self, client: Runloop) -> None:
lsp = client.devboxes.lsp.document_symbols(
id="id",
- uri="uri",
+ uri="string",
)
assert_matches_type(object, lsp, path=["response"])
@@ -258,7 +258,7 @@ def test_method_document_symbols(self, client: Runloop) -> None:
def test_raw_response_document_symbols(self, client: Runloop) -> None:
response = client.devboxes.lsp.with_raw_response.document_symbols(
id="id",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -270,7 +270,7 @@ def test_raw_response_document_symbols(self, client: Runloop) -> None:
def test_streaming_response_document_symbols(self, client: Runloop) -> None:
with client.devboxes.lsp.with_streaming_response.document_symbols(
id="id",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -285,14 +285,14 @@ def test_path_params_document_symbols(self, client: Runloop) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
client.devboxes.lsp.with_raw_response.document_symbols(
id="",
- uri="uri",
+ uri="string",
)
@parametrize
def test_method_file(self, client: Runloop) -> None:
lsp = client.devboxes.lsp.file(
id="id",
- path="path",
+ path="string",
)
assert_matches_type(FileContentsResponse, lsp, path=["response"])
@@ -300,7 +300,7 @@ def test_method_file(self, client: Runloop) -> None:
def test_raw_response_file(self, client: Runloop) -> None:
response = client.devboxes.lsp.with_raw_response.file(
id="id",
- path="path",
+ path="string",
)
assert response.is_closed is True
@@ -312,7 +312,7 @@ def test_raw_response_file(self, client: Runloop) -> None:
def test_streaming_response_file(self, client: Runloop) -> None:
with client.devboxes.lsp.with_streaming_response.file(
id="id",
- path="path",
+ path="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -327,7 +327,7 @@ def test_path_params_file(self, client: Runloop) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
client.devboxes.lsp.with_raw_response.file(
id="",
- path="path",
+ path="string",
)
@parametrize
@@ -422,7 +422,7 @@ def test_path_params_files(self, client: Runloop) -> None:
def test_method_formatting(self, client: Runloop) -> None:
lsp = client.devboxes.lsp.formatting(
id="id",
- uri="uri",
+ uri="string",
)
assert_matches_type(object, lsp, path=["response"])
@@ -430,7 +430,7 @@ def test_method_formatting(self, client: Runloop) -> None:
def test_raw_response_formatting(self, client: Runloop) -> None:
response = client.devboxes.lsp.with_raw_response.formatting(
id="id",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -442,7 +442,7 @@ def test_raw_response_formatting(self, client: Runloop) -> None:
def test_streaming_response_formatting(self, client: Runloop) -> None:
with client.devboxes.lsp.with_streaming_response.formatting(
id="id",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -457,7 +457,7 @@ def test_path_params_formatting(self, client: Runloop) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
client.devboxes.lsp.with_raw_response.formatting(
id="",
- uri="uri",
+ uri="string",
)
@parametrize
@@ -583,7 +583,7 @@ def test_method_get_code_segment_info(self, client: Runloop) -> None:
lsp = client.devboxes.lsp.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
)
assert_matches_type(CodeSegmentInfoResponse, lsp, path=["response"])
@@ -592,7 +592,7 @@ def test_method_get_code_segment_info_with_all_params(self, client: Runloop) ->
lsp = client.devboxes.lsp.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
symbol_type="function",
)
assert_matches_type(CodeSegmentInfoResponse, lsp, path=["response"])
@@ -602,7 +602,7 @@ def test_raw_response_get_code_segment_info(self, client: Runloop) -> None:
response = client.devboxes.lsp.with_raw_response.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -615,7 +615,7 @@ def test_streaming_response_get_code_segment_info(self, client: Runloop) -> None
with client.devboxes.lsp.with_streaming_response.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -631,7 +631,7 @@ def test_path_params_get_code_segment_info(self, client: Runloop) -> None:
client.devboxes.lsp.with_raw_response.get_code_segment_info(
id="",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
)
@parametrize
@@ -776,7 +776,7 @@ def test_path_params_references(self, client: Runloop) -> None:
def test_method_set_watch_directory(self, client: Runloop) -> None:
lsp = client.devboxes.lsp.set_watch_directory(
id="id",
- path="path",
+ path="string",
)
assert_matches_type(str, lsp, path=["response"])
@@ -784,7 +784,7 @@ def test_method_set_watch_directory(self, client: Runloop) -> None:
def test_raw_response_set_watch_directory(self, client: Runloop) -> None:
response = client.devboxes.lsp.with_raw_response.set_watch_directory(
id="id",
- path="path",
+ path="string",
)
assert response.is_closed is True
@@ -796,7 +796,7 @@ def test_raw_response_set_watch_directory(self, client: Runloop) -> None:
def test_streaming_response_set_watch_directory(self, client: Runloop) -> None:
with client.devboxes.lsp.with_streaming_response.set_watch_directory(
id="id",
- path="path",
+ path="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -811,7 +811,7 @@ def test_path_params_set_watch_directory(self, client: Runloop) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
client.devboxes.lsp.with_raw_response.set_watch_directory(
id="",
- path="path",
+ path="string",
)
@@ -921,7 +921,7 @@ async def test_method_code_actions_with_all_params(self, async_client: AsyncRunl
},
},
"code": 0,
- "code_description": {"href": "href"},
+ "code_description": {"href": "string"},
"data": {},
"related_information": [
{
@@ -936,7 +936,7 @@ async def test_method_code_actions_with_all_params(self, async_client: AsyncRunl
"line": 0,
},
},
- "uri": "uri",
+ "uri": "string",
},
"message": "message",
}
@@ -1000,7 +1000,7 @@ async def test_path_params_code_actions(self, async_client: AsyncRunloop) -> Non
async def test_method_diagnostics(self, async_client: AsyncRunloop) -> None:
lsp = await async_client.devboxes.lsp.diagnostics(
id="id",
- uri="uri",
+ uri="string",
)
assert_matches_type(DiagnosticsResponse, lsp, path=["response"])
@@ -1008,7 +1008,7 @@ async def test_method_diagnostics(self, async_client: AsyncRunloop) -> None:
async def test_raw_response_diagnostics(self, async_client: AsyncRunloop) -> None:
response = await async_client.devboxes.lsp.with_raw_response.diagnostics(
id="id",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -1020,7 +1020,7 @@ async def test_raw_response_diagnostics(self, async_client: AsyncRunloop) -> Non
async def test_streaming_response_diagnostics(self, async_client: AsyncRunloop) -> None:
async with async_client.devboxes.lsp.with_streaming_response.diagnostics(
id="id",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1035,14 +1035,14 @@ async def test_path_params_diagnostics(self, async_client: AsyncRunloop) -> None
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
await async_client.devboxes.lsp.with_raw_response.diagnostics(
id="",
- uri="uri",
+ uri="string",
)
@parametrize
async def test_method_document_symbols(self, async_client: AsyncRunloop) -> None:
lsp = await async_client.devboxes.lsp.document_symbols(
id="id",
- uri="uri",
+ uri="string",
)
assert_matches_type(object, lsp, path=["response"])
@@ -1050,7 +1050,7 @@ async def test_method_document_symbols(self, async_client: AsyncRunloop) -> None
async def test_raw_response_document_symbols(self, async_client: AsyncRunloop) -> None:
response = await async_client.devboxes.lsp.with_raw_response.document_symbols(
id="id",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -1062,7 +1062,7 @@ async def test_raw_response_document_symbols(self, async_client: AsyncRunloop) -
async def test_streaming_response_document_symbols(self, async_client: AsyncRunloop) -> None:
async with async_client.devboxes.lsp.with_streaming_response.document_symbols(
id="id",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1077,14 +1077,14 @@ async def test_path_params_document_symbols(self, async_client: AsyncRunloop) ->
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
await async_client.devboxes.lsp.with_raw_response.document_symbols(
id="",
- uri="uri",
+ uri="string",
)
@parametrize
async def test_method_file(self, async_client: AsyncRunloop) -> None:
lsp = await async_client.devboxes.lsp.file(
id="id",
- path="path",
+ path="string",
)
assert_matches_type(FileContentsResponse, lsp, path=["response"])
@@ -1092,7 +1092,7 @@ async def test_method_file(self, async_client: AsyncRunloop) -> None:
async def test_raw_response_file(self, async_client: AsyncRunloop) -> None:
response = await async_client.devboxes.lsp.with_raw_response.file(
id="id",
- path="path",
+ path="string",
)
assert response.is_closed is True
@@ -1104,7 +1104,7 @@ async def test_raw_response_file(self, async_client: AsyncRunloop) -> None:
async def test_streaming_response_file(self, async_client: AsyncRunloop) -> None:
async with async_client.devboxes.lsp.with_streaming_response.file(
id="id",
- path="path",
+ path="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1119,7 +1119,7 @@ async def test_path_params_file(self, async_client: AsyncRunloop) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
await async_client.devboxes.lsp.with_raw_response.file(
id="",
- path="path",
+ path="string",
)
@parametrize
@@ -1214,7 +1214,7 @@ async def test_path_params_files(self, async_client: AsyncRunloop) -> None:
async def test_method_formatting(self, async_client: AsyncRunloop) -> None:
lsp = await async_client.devboxes.lsp.formatting(
id="id",
- uri="uri",
+ uri="string",
)
assert_matches_type(object, lsp, path=["response"])
@@ -1222,7 +1222,7 @@ async def test_method_formatting(self, async_client: AsyncRunloop) -> None:
async def test_raw_response_formatting(self, async_client: AsyncRunloop) -> None:
response = await async_client.devboxes.lsp.with_raw_response.formatting(
id="id",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -1234,7 +1234,7 @@ async def test_raw_response_formatting(self, async_client: AsyncRunloop) -> None
async def test_streaming_response_formatting(self, async_client: AsyncRunloop) -> None:
async with async_client.devboxes.lsp.with_streaming_response.formatting(
id="id",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1249,7 +1249,7 @@ async def test_path_params_formatting(self, async_client: AsyncRunloop) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
await async_client.devboxes.lsp.with_raw_response.formatting(
id="",
- uri="uri",
+ uri="string",
)
@parametrize
@@ -1375,7 +1375,7 @@ async def test_method_get_code_segment_info(self, async_client: AsyncRunloop) ->
lsp = await async_client.devboxes.lsp.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
)
assert_matches_type(CodeSegmentInfoResponse, lsp, path=["response"])
@@ -1384,7 +1384,7 @@ async def test_method_get_code_segment_info_with_all_params(self, async_client:
lsp = await async_client.devboxes.lsp.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
symbol_type="function",
)
assert_matches_type(CodeSegmentInfoResponse, lsp, path=["response"])
@@ -1394,7 +1394,7 @@ async def test_raw_response_get_code_segment_info(self, async_client: AsyncRunlo
response = await async_client.devboxes.lsp.with_raw_response.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
)
assert response.is_closed is True
@@ -1407,7 +1407,7 @@ async def test_streaming_response_get_code_segment_info(self, async_client: Asyn
async with async_client.devboxes.lsp.with_streaming_response.get_code_segment_info(
id="id",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1423,7 +1423,7 @@ async def test_path_params_get_code_segment_info(self, async_client: AsyncRunloo
await async_client.devboxes.lsp.with_raw_response.get_code_segment_info(
id="",
symbol_name="symbolName",
- uri="uri",
+ uri="string",
)
@parametrize
@@ -1568,7 +1568,7 @@ async def test_path_params_references(self, async_client: AsyncRunloop) -> None:
async def test_method_set_watch_directory(self, async_client: AsyncRunloop) -> None:
lsp = await async_client.devboxes.lsp.set_watch_directory(
id="id",
- path="path",
+ path="string",
)
assert_matches_type(str, lsp, path=["response"])
@@ -1576,7 +1576,7 @@ async def test_method_set_watch_directory(self, async_client: AsyncRunloop) -> N
async def test_raw_response_set_watch_directory(self, async_client: AsyncRunloop) -> None:
response = await async_client.devboxes.lsp.with_raw_response.set_watch_directory(
id="id",
- path="path",
+ path="string",
)
assert response.is_closed is True
@@ -1588,7 +1588,7 @@ async def test_raw_response_set_watch_directory(self, async_client: AsyncRunloop
async def test_streaming_response_set_watch_directory(self, async_client: AsyncRunloop) -> None:
async with async_client.devboxes.lsp.with_streaming_response.set_watch_directory(
id="id",
- path="path",
+ path="string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -1603,5 +1603,5 @@ async def test_path_params_set_watch_directory(self, async_client: AsyncRunloop)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
await async_client.devboxes.lsp.with_raw_response.set_watch_directory(
id="",
- path="path",
+ path="string",
)
diff --git a/tests/api_resources/scenarios/__init__.py b/tests/api_resources/scenarios/__init__.py
new file mode 100644
index 000000000..fd8019a9a
--- /dev/null
+++ b/tests/api_resources/scenarios/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/scenarios/test_runs.py b/tests/api_resources/scenarios/test_runs.py
new file mode 100644
index 000000000..0fa9873eb
--- /dev/null
+++ b/tests/api_resources/scenarios/test_runs.py
@@ -0,0 +1,316 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from runloop_api_client import Runloop, AsyncRunloop
+from runloop_api_client.types import ScenarioRunView, ScenarioRunListView
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestRuns:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: Runloop) -> None:
+ run = client.scenarios.runs.retrieve(
+ "id",
+ )
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Runloop) -> None:
+ response = client.scenarios.runs.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Runloop) -> None:
+ with client.scenarios.runs.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.scenarios.runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: Runloop) -> None:
+ run = client.scenarios.runs.list()
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: Runloop) -> None:
+ run = client.scenarios.runs.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: Runloop) -> None:
+ response = client.scenarios.runs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = response.parse()
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: Runloop) -> None:
+ with client.scenarios.runs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = response.parse()
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_complete(self, client: Runloop) -> None:
+ run = client.scenarios.runs.complete(
+ "id",
+ )
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ def test_raw_response_complete(self, client: Runloop) -> None:
+ response = client.scenarios.runs.with_raw_response.complete(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ def test_streaming_response_complete(self, client: Runloop) -> None:
+ with client.scenarios.runs.with_streaming_response.complete(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_complete(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.scenarios.runs.with_raw_response.complete(
+ "",
+ )
+
+ @parametrize
+ def test_method_score(self, client: Runloop) -> None:
+ run = client.scenarios.runs.score(
+ "id",
+ )
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ def test_raw_response_score(self, client: Runloop) -> None:
+ response = client.scenarios.runs.with_raw_response.score(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ def test_streaming_response_score(self, client: Runloop) -> None:
+ with client.scenarios.runs.with_streaming_response.score(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_score(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.scenarios.runs.with_raw_response.score(
+ "",
+ )
+
+
+class TestAsyncRuns:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.scenarios.runs.retrieve(
+ "id",
+ )
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.runs.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = await response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.runs.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = await response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.scenarios.runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.scenarios.runs.list()
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.scenarios.runs.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.runs.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = await response.parse()
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.runs.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = await response.parse()
+ assert_matches_type(ScenarioRunListView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_complete(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.scenarios.runs.complete(
+ "id",
+ )
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ async def test_raw_response_complete(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.runs.with_raw_response.complete(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = await response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_complete(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.runs.with_streaming_response.complete(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = await response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_complete(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.scenarios.runs.with_raw_response.complete(
+ "",
+ )
+
+ @parametrize
+ async def test_method_score(self, async_client: AsyncRunloop) -> None:
+ run = await async_client.scenarios.runs.score(
+ "id",
+ )
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ async def test_raw_response_score(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.runs.with_raw_response.score(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ run = await response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_score(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.runs.with_streaming_response.score(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ run = await response.parse()
+ assert_matches_type(ScenarioRunView, run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_score(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.scenarios.runs.with_raw_response.score(
+ "",
+ )
diff --git a/tests/api_resources/test_benchmarks.py b/tests/api_resources/test_benchmarks.py
new file mode 100644
index 000000000..0e1735e07
--- /dev/null
+++ b/tests/api_resources/test_benchmarks.py
@@ -0,0 +1,324 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from runloop_api_client import Runloop, AsyncRunloop
+from runloop_api_client.types import (
+ BenchmarkView,
+ BenchmarkRunView,
+ BenchmarkListView,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestBenchmarks:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: Runloop) -> None:
+ benchmark = client.benchmarks.create(
+ name="name",
+ )
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: Runloop) -> None:
+ benchmark = client.benchmarks.create(
+ name="name",
+ scenario_ids=["string"],
+ )
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: Runloop) -> None:
+ response = client.benchmarks.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: Runloop) -> None:
+ with client.benchmarks.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: Runloop) -> None:
+ benchmark = client.benchmarks.retrieve(
+ "id",
+ )
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Runloop) -> None:
+ response = client.benchmarks.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Runloop) -> None:
+ with client.benchmarks.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.benchmarks.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: Runloop) -> None:
+ benchmark = client.benchmarks.list()
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: Runloop) -> None:
+ benchmark = client.benchmarks.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: Runloop) -> None:
+ response = client.benchmarks.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: Runloop) -> None:
+ with client.benchmarks.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_start_run(self, client: Runloop) -> None:
+ benchmark = client.benchmarks.start_run(
+ benchmark_id="benchmark_id",
+ )
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ @parametrize
+ def test_method_start_run_with_all_params(self, client: Runloop) -> None:
+ benchmark = client.benchmarks.start_run(
+ benchmark_id="benchmark_id",
+ run_name="run_name",
+ )
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ @parametrize
+ def test_raw_response_start_run(self, client: Runloop) -> None:
+ response = client.benchmarks.with_raw_response.start_run(
+ benchmark_id="benchmark_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ @parametrize
+ def test_streaming_response_start_run(self, client: Runloop) -> None:
+ with client.benchmarks.with_streaming_response.start_run(
+ benchmark_id="benchmark_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = response.parse()
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncBenchmarks:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncRunloop) -> None:
+ benchmark = await async_client.benchmarks.create(
+ name="name",
+ )
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -> None:
+ benchmark = await async_client.benchmarks.create(
+ name="name",
+ scenario_ids=["string"],
+ )
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.benchmarks.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncRunloop) -> None:
+ async with async_client.benchmarks.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncRunloop) -> None:
+ benchmark = await async_client.benchmarks.retrieve(
+ "id",
+ )
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.benchmarks.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ async with async_client.benchmarks.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.benchmarks.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncRunloop) -> None:
+ benchmark = await async_client.benchmarks.list()
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncRunloop) -> None:
+ benchmark = await async_client.benchmarks.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.benchmarks.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None:
+ async with async_client.benchmarks.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkListView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_start_run(self, async_client: AsyncRunloop) -> None:
+ benchmark = await async_client.benchmarks.start_run(
+ benchmark_id="benchmark_id",
+ )
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_method_start_run_with_all_params(self, async_client: AsyncRunloop) -> None:
+ benchmark = await async_client.benchmarks.start_run(
+ benchmark_id="benchmark_id",
+ run_name="run_name",
+ )
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_raw_response_start_run(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.benchmarks.with_raw_response.start_run(
+ benchmark_id="benchmark_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_start_run(self, async_client: AsyncRunloop) -> None:
+ async with async_client.benchmarks.with_streaming_response.start_run(
+ benchmark_id="benchmark_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ benchmark = await response.parse()
+ assert_matches_type(BenchmarkRunView, benchmark, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_scenarios.py b/tests/api_resources/test_scenarios.py
new file mode 100644
index 000000000..509542559
--- /dev/null
+++ b/tests/api_resources/test_scenarios.py
@@ -0,0 +1,408 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from runloop_api_client import Runloop, AsyncRunloop
+from runloop_api_client.types import (
+ ScenarioView,
+ ScenarioRunView,
+ ScenarioListView,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestScenarios:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: Runloop) -> None:
+ scenario = client.scenarios.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ }
+ ]
+ },
+ )
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: Runloop) -> None:
+ scenario = client.scenarios.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ "bash_script": "bash_script",
+ }
+ ]
+ },
+ environment_parameters={
+ "blueprint_id": "blueprint_id",
+ "prebuilt_id": "prebuilt_id",
+ "snapshot_id": "snapshot_id",
+ },
+ )
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: Runloop) -> None:
+ response = client.scenarios.with_raw_response.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ }
+ ]
+ },
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: Runloop) -> None:
+ with client.scenarios.with_streaming_response.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ }
+ ]
+ },
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: Runloop) -> None:
+ scenario = client.scenarios.retrieve(
+ "id",
+ )
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Runloop) -> None:
+ response = client.scenarios.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Runloop) -> None:
+ with client.scenarios.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: Runloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.scenarios.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: Runloop) -> None:
+ scenario = client.scenarios.list()
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: Runloop) -> None:
+ scenario = client.scenarios.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: Runloop) -> None:
+ response = client.scenarios.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = response.parse()
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: Runloop) -> None:
+ with client.scenarios.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = response.parse()
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_start_run(self, client: Runloop) -> None:
+ scenario = client.scenarios.start_run(
+ scenario_id="scenario_id",
+ )
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ @parametrize
+ def test_method_start_run_with_all_params(self, client: Runloop) -> None:
+ scenario = client.scenarios.start_run(
+ scenario_id="scenario_id",
+ benchmark_run_id="benchmark_run_id",
+ run_name="run_name",
+ )
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ @parametrize
+ def test_raw_response_start_run(self, client: Runloop) -> None:
+ response = client.scenarios.with_raw_response.start_run(
+ scenario_id="scenario_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = response.parse()
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ @parametrize
+ def test_streaming_response_start_run(self, client: Runloop) -> None:
+ with client.scenarios.with_streaming_response.start_run(
+ scenario_id="scenario_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = response.parse()
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncScenarios:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncRunloop) -> None:
+ scenario = await async_client.scenarios.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ }
+ ]
+ },
+ )
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncRunloop) -> None:
+ scenario = await async_client.scenarios.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ "bash_script": "bash_script",
+ }
+ ]
+ },
+ environment_parameters={
+ "blueprint_id": "blueprint_id",
+ "prebuilt_id": "prebuilt_id",
+ "snapshot_id": "snapshot_id",
+ },
+ )
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.with_raw_response.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ }
+ ]
+ },
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = await response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.with_streaming_response.create(
+ input_context={"problem_statement": "problem_statement"},
+ name="name",
+ scoring_contract={
+ "scoring_function_parameters": [
+ {
+ "name": "name",
+ "weight": 0,
+ }
+ ]
+ },
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = await response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncRunloop) -> None:
+ scenario = await async_client.scenarios.retrieve(
+ "id",
+ )
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.with_raw_response.retrieve(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = await response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.with_streaming_response.retrieve(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = await response.parse()
+ assert_matches_type(ScenarioView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncRunloop) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.scenarios.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncRunloop) -> None:
+ scenario = await async_client.scenarios.list()
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncRunloop) -> None:
+ scenario = await async_client.scenarios.list(
+ limit=0,
+ starting_after="starting_after",
+ )
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = await response.parse()
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = await response.parse()
+ assert_matches_type(ScenarioListView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_start_run(self, async_client: AsyncRunloop) -> None:
+ scenario = await async_client.scenarios.start_run(
+ scenario_id="scenario_id",
+ )
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ @parametrize
+ async def test_method_start_run_with_all_params(self, async_client: AsyncRunloop) -> None:
+ scenario = await async_client.scenarios.start_run(
+ scenario_id="scenario_id",
+ benchmark_run_id="benchmark_run_id",
+ run_name="run_name",
+ )
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ @parametrize
+ async def test_raw_response_start_run(self, async_client: AsyncRunloop) -> None:
+ response = await async_client.scenarios.with_raw_response.start_run(
+ scenario_id="scenario_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scenario = await response.parse()
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_start_run(self, async_client: AsyncRunloop) -> None:
+ async with async_client.scenarios.with_streaming_response.start_run(
+ scenario_id="scenario_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scenario = await response.parse()
+ assert_matches_type(ScenarioRunView, scenario, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/test_client.py b/tests/test_client.py
index befb5ec6a..81c247f78 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -6,6 +6,7 @@
import os
import sys
import json
+import time
import asyncio
import inspect
import subprocess
@@ -1721,10 +1722,20 @@ async def test_main() -> None:
[sys.executable, "-c", test_code],
text=True,
) as process:
- try:
- process.wait(2)
- if process.returncode:
- raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code")
- except subprocess.TimeoutExpired as e:
- process.kill()
- raise AssertionError("calling get_platform using asyncify resulted in a hung process") from e
+ timeout = 10 # seconds
+
+ start_time = time.monotonic()
+ while True:
+ return_code = process.poll()
+ if return_code is not None:
+ if return_code != 0:
+ raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code")
+
+ # success
+ break
+
+ if time.monotonic() - start_time > timeout:
+ process.kill()
+ raise AssertionError("calling get_platform using asyncify resulted in a hung process")
+
+ time.sleep(0.1)