diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..d3847246 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +* @a-alferov @Sealwing +*.py @a-alferov @Sealwing @DanBalalan @Starovoitov diff --git a/.github/workflows/on_push_to_pull_request.yaml b/.github/workflows/on_push_to_pull_request.yaml new file mode 100644 index 00000000..f80ed36a --- /dev/null +++ b/.github/workflows/on_push_to_pull_request.yaml @@ -0,0 +1,16 @@ +name: Validate Pull Request Changes + +# most important for us is "synchronize" event for pull request, which is included by default +on: pull_request + +jobs: + lint: + name: Lint Python code + uses: ./.github/workflows/step_lint.yaml + + unit_tests: + name: Run unit tests + uses: ./.github/workflows/step_test_from_dir.yaml + with: + target: tests/unit + description: Unit diff --git a/.github/workflows/step_lint.yaml b/.github/workflows/step_lint.yaml new file mode 100644 index 00000000..44eb7d29 --- /dev/null +++ b/.github/workflows/step_lint.yaml @@ -0,0 +1,23 @@ +name: Run Linters + +on: + workflow_call: + +jobs: + lint-python: + name: Lint Python Code + runs-on: ubuntu-24.04 + env: + CODE_DIRS: "adcm_aio_client tests" + steps: + - name: Install poetry + run: python -m pip install poetry + - uses: actions/checkout@v4 + - name: Install dependencies + run: poetry install --with dev --with test --no-root + - name: Run ruff lint check + run: poetry run ruff check $CODE_DIRS + - name: Run ruff format check + run: poetry run ruff format --check $CODE_DIRS + - name: Run pyright check + run: poetry run pyright $CODE_DIRS diff --git a/.github/workflows/step_test_from_dir.yaml b/.github/workflows/step_test_from_dir.yaml new file mode 100644 index 00000000..07f607d5 --- /dev/null +++ b/.github/workflows/step_test_from_dir.yaml @@ -0,0 +1,32 @@ +name: Run Tests +run-name: "Run Tests: ${{ inputs.description }}" + +on: + workflow_call: + inputs: + target: + type: string + required: true + description: "Directory with tests to aim to" + description: + type: string + required: false + default: "unspecified" + description: "Name to use in `run-name` for tests to be more specific" + +jobs: + run-pytest-in-dir: + name: Run Tests + runs-on: ubuntu-24.04 + env: + CODE_DIRS: "adcm_aio_client tests" + steps: + - name: Install poetry + run: python -m pip install poetry + - uses: actions/checkout@v4 + - name: Install dependencies + # install "with root" so adcm_aio_client + # will be accessible without PYTHONPATH manipulations + run: poetry install --with test + - name: Run tests + run: poetry run pytest ${{ inputs.target }} -v diff --git a/.gitignore b/.gitignore index 82f92755..7b6caf34 100644 --- a/.gitignore +++ b/.gitignore @@ -159,4 +159,4 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ +.idea/ diff --git a/adcm_aio_client/__init__.py b/adcm_aio_client/__init__.py new file mode 100644 index 00000000..ab4d2777 --- /dev/null +++ b/adcm_aio_client/__init__.py @@ -0,0 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from adcm_aio_client._session import ADCMSession + +__all__ = ["ADCMSession"] diff --git a/adcm_aio_client/_session.py b/adcm_aio_client/_session.py new file mode 100644 index 00000000..ec5cc16b --- /dev/null +++ b/adcm_aio_client/_session.py @@ -0,0 +1,143 @@ +from json import JSONDecodeError +from types import TracebackType +from typing import Self + +import httpx +import adcm_version + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.errors import ClientInitError, NotSupportedVersionError +from adcm_aio_client.core.requesters import BundleRetriever, DefaultRequester +from adcm_aio_client.core.types import Cert, ConnectionSecurity, Credentials, RequestPolicy, RetryPolicy, SessionInfo + +MIN_ADCM_VERSION = "2.5.0" + + +class ADCMSession: + def __init__( + self: Self, + # basics + url: str, + credentials: Credentials, + *, + # security + verify: str | bool = True, + cert: Cert | None = None, + # requesting behavior + timeout: int = 600, + retry_attempts: int = 3, + retry_interval: int = 1, + ) -> None: + self._session_info = SessionInfo( + url=url, credentials=credentials, security=ConnectionSecurity(verify=verify, certificate=cert) + ) + self._request_policy = RequestPolicy( + timeout=timeout, retry=RetryPolicy(attempts=retry_attempts, interval=retry_interval) + ) + + self._http_client = None + self._requester = None + self._adcm_client = None + + # Context Manager + + async def __aenter__(self: Self) -> ADCMClient: + self._http_client = await self._prepare_http_client_for_running_adcm() + adcm_version_ = await _ensure_adcm_version_is_supported(client=self._http_client) + + try: + self._requester = self._prepare_api_v2_requester() + await self._requester.login(self._session_info.credentials) + except Exception as e: + await self.__close_http_client_safe(exc_type=type(e), exc_value=e) + raise + + self._adcm_client = self._prepare_adcm_client(version=adcm_version_) + return self._adcm_client + + async def __aexit__( + self: Self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + await self.__close_requester_safe(exc_type, exc_value, traceback) + await self.__close_http_client_safe(exc_type, exc_value, traceback) + + async def __close_requester_safe( + self: Self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + if self._requester: + try: + await self._requester.logout() + except: + await self.__close_http_client_safe(exc_type, exc_value, traceback) + + raise + + async def __close_http_client_safe( + self: Self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: TracebackType | None = None, + ) -> None: + if self._http_client: + await self._http_client.__aexit__(exc_type, exc_value, traceback) + + # Steps + + async def _prepare_http_client_for_running_adcm(self: Self) -> httpx.AsyncClient: + client = httpx.AsyncClient( + base_url=self._session_info.url, + timeout=self._request_policy.timeout, + verify=self._session_info.security.verify, + cert=self._session_info.security.certificate, + ) + + try: + await client.head("/") + except httpx.NetworkError as e: + await client.__aexit__(type(e), e) + message = f"Failed to connect to ADCM at URL {self._session_info.url}" + raise ClientInitError(message) from e + + return client + + def _prepare_api_v2_requester(self: Self) -> DefaultRequester: + if self._http_client is None: + message = "Failed to prepare requester: HTTP client is not initialized" + raise RuntimeError(message) + + return DefaultRequester(http_client=self._http_client, retries=self._request_policy.retry) + + def _prepare_adcm_client(self: Self, version: str) -> ADCMClient: + if self._requester is None: + message = "Failed to prepare ADCM client: requester is not initialized" + raise RuntimeError(message) + + bundle_retriever = BundleRetriever() + + return ADCMClient(requester=self._requester, bundle_retriever=bundle_retriever, adcm_version=version) + + +async def _ensure_adcm_version_is_supported(client: httpx.AsyncClient) -> str: + try: + # todo check for VERY old versions if that request will raise error + response = await client.get("/versions/") + data = response.json() + version = str(data["adcm"]["version"]) + except (JSONDecodeError, KeyError) as e: + message = ( + f"Failed to detect ADCM version at {client.base_url}. " + f"Most likely ADCM version is lesser than {MIN_ADCM_VERSION}" + ) + raise NotSupportedVersionError(message) from e + + if adcm_version.compare_adcm_versions(version, MIN_ADCM_VERSION) < 0: + message = f"Minimal supported ADCM version is {MIN_ADCM_VERSION}. Got {adcm_version}" + raise NotSupportedVersionError(message) + + return version diff --git a/adcm_aio_client/core/__init__.py b/adcm_aio_client/core/__init__.py new file mode 100644 index 00000000..4d9a9249 --- /dev/null +++ b/adcm_aio_client/core/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/adcm_aio_client/core/actions/__init__.py b/adcm_aio_client/core/actions/__init__.py new file mode 100644 index 00000000..10a45c9f --- /dev/null +++ b/adcm_aio_client/core/actions/__init__.py @@ -0,0 +1,3 @@ +from adcm_aio_client.core.actions._objects import ActionsAccessor, UpgradeNode + +__all__ = ["ActionsAccessor", "UpgradeNode"] diff --git a/adcm_aio_client/core/actions/_objects.py b/adcm_aio_client/core/actions/_objects.py new file mode 100644 index 00000000..f595a017 --- /dev/null +++ b/adcm_aio_client/core/actions/_objects.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +from functools import cached_property +from typing import TYPE_CHECKING, Any, Self + +from asyncstdlib import cached_property as async_cached_property + +from adcm_aio_client.core.config._objects import ActionConfig +from adcm_aio_client.core.config.types import ConfigData +from adcm_aio_client.core.errors import ( + ConflictError, + HostNotInClusterError, + NoConfigInActionError, + NoMappingInActionError, + ObjectBlockedError, +) +from adcm_aio_client.core.filters import FilterByDisplayName, FilterByName, Filtering +from adcm_aio_client.core.mapping import ActionMapping +from adcm_aio_client.core.objects._accessors import NonPaginatedChildAccessor +from adcm_aio_client.core.objects._base import InteractiveChildObject, InteractiveObject + +if TYPE_CHECKING: + from adcm_aio_client.core.objects.cm import Bundle, Cluster, Job + + +class Action(InteractiveChildObject): + PATH_PREFIX = "actions" + + def __init__(self: Self, parent: InteractiveObject, data: dict[str, Any]) -> None: + super().__init__(parent, data) + self._verbose = False + self._blocking = True + + @property + def verbose(self: Self) -> bool: + return self._verbose + + @verbose.setter + def verbose(self: Self, value: bool) -> bool: + self._verbose = value + return self._verbose + + @property + def blocking(self: Self) -> bool: + return self._blocking + + @blocking.setter + def blocking(self: Self, value: bool) -> bool: + self._blocking = value + return self._blocking + + @cached_property + def name(self: Self) -> str: + return self._data["name"] + + @cached_property + def display_name(self: Self) -> str: + return self._data["displayName"] + + async def run(self: Self) -> Job: + from adcm_aio_client.core.objects.cm import Job + + await self._ensure_rich_data() + + data = {"isVerbose": self._verbose, "shouldBlockObject": self._blocking} + if self._has_mapping: + mapping = await self.mapping + data |= {"hostComponentMap": mapping._to_payload()} + if self._has_config: + config = await self.config + data |= {"configuration": config._to_payload()} + + try: + response = await self._requester.post(*self.get_own_path(), "run", data=data) + except ConflictError as e: + if "has issue" in str(e): + raise ObjectBlockedError(*e.args) from None + raise + + return Job(requester=self._requester, data=response.as_dict()) + + @async_cached_property + async def mapping(self: Self) -> ActionMapping: + await self._ensure_rich_data() + + if not self._has_mapping: + message = f"Action {self.display_name} doesn't allow mapping changes" + raise NoMappingInActionError(message) + + cluster = await detect_cluster(owner=self._parent) + mapping = await cluster.mapping + entries = mapping.all() + + return ActionMapping(owner=self._parent, cluster=cluster, entries=entries) + + @async_cached_property + async def config(self: Self) -> ActionConfig: + await self._ensure_rich_data() + + if not self._has_config: + message = f"Action {self.display_name} doesn't allow config changes" + raise NoConfigInActionError(message) + + configuration = self._configuration + data = ConfigData.from_v2_response(data_in_v2_format=configuration) + schema = configuration["configSchema"] + + return ActionConfig(schema=schema, config=data, parent=self) + + @property + def _is_full_data_loaded(self: Self) -> bool: + return "hostComponentMapRules" in self._data + + @property + def _has_mapping(self: Self) -> bool: + return bool(self._mapping_rule) + + @property + def _has_config(self: Self) -> bool: + return bool(self._configuration) + + @property + def _mapping_rule(self: Self) -> list[dict]: + try: + return self._data["hostComponentMapRules"] + except KeyError as e: + message = ( + "Failed to retrieve mapping rules. " + "Most likely action was initialized with partial data." + " Need to load all data" + ) + raise KeyError(message) from e + + @property + def _configuration(self: Self) -> dict: + try: + return self._data["configuration"] + except KeyError as e: + message = ( + "Failed to retrieve configuration section. " + "Most likely action was initialized with partial data." + " Need to load all data" + ) + raise KeyError(message) from e + + async def _ensure_rich_data(self: Self) -> None: + if self._is_full_data_loaded: + return + + self._data = await self._retrieve_data() + + +class ActionsAccessor[Parent: InteractiveObject](NonPaginatedChildAccessor[Parent, Action]): + class_type = Action + filtering = Filtering(FilterByName, FilterByDisplayName) + + +class Upgrade(Action): + PATH_PREFIX = "upgrades" + + @property + def bundle(self: Self) -> Bundle: + from adcm_aio_client.core.objects.cm import Bundle + + return Bundle(requester=self._requester, data=self._data["bundle"]) + + +class UpgradeNode(NonPaginatedChildAccessor): + class_type = Upgrade + filtering = Filtering(FilterByName, FilterByDisplayName) + + +async def detect_cluster(owner: InteractiveObject) -> Cluster: + from adcm_aio_client.core.objects.cm import Cluster, Component, Host, Service + + if isinstance(owner, Cluster): + return owner + + if isinstance(owner, (Service, Component)): + return owner.cluster + + if isinstance(owner, Host): + cluster = await owner.cluster + if cluster is None: + message = f"Host {owner.name} isn't bound to cluster " "or it's not refreshed" + raise HostNotInClusterError(message) + + return cluster + + message = f"No cluster in hierarchy for {owner}" + raise RuntimeError(message) diff --git a/adcm_aio_client/core/client.py b/adcm_aio_client/core/client.py new file mode 100644 index 00000000..2992fc63 --- /dev/null +++ b/adcm_aio_client/core/client.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import cached_property +from typing import Self + +from adcm_aio_client.core.objects.cm import ADCM, BundlesNode, ClustersNode, HostProvidersNode, HostsNode, JobsNode +from adcm_aio_client.core.requesters import BundleRetrieverInterface, Requester + +MIN_ADCM_VERSION = "2.5.0" + + +class ADCMClient: + def __init__( + self: Self, requester: Requester, bundle_retriever: BundleRetrieverInterface, adcm_version: str + ) -> None: + self._requester = requester + self._retrieve_bundle_from_remote_url = bundle_retriever + self._adcm_version = adcm_version + + @cached_property + def clusters(self: Self) -> ClustersNode: + return ClustersNode(path=("clusters",), requester=self._requester) + + @cached_property + def hosts(self: Self) -> HostsNode: + return HostsNode(path=("hosts",), requester=self._requester) + + @cached_property + def hostproviders(self: Self) -> HostProvidersNode: + return HostProvidersNode(path=("hostproviders",), requester=self._requester) + + @cached_property + def adcm(self: Self) -> ADCM: + return ADCM(requester=self._requester, data={}, version=self._adcm_version) + + @cached_property + def bundles(self: Self) -> BundlesNode: + return BundlesNode( + path=("bundles",), requester=self._requester, retriever=self._retrieve_bundle_from_remote_url + ) + + @cached_property + def jobs(self: Self) -> JobsNode: + return JobsNode(path=("tasks",), requester=self._requester) diff --git a/adcm_aio_client/core/config/__init__.py b/adcm_aio_client/core/config/__init__.py new file mode 100644 index 00000000..d597b330 --- /dev/null +++ b/adcm_aio_client/core/config/__init__.py @@ -0,0 +1,25 @@ +from adcm_aio_client.core.config._objects import ( + ActionConfig, + ActivatableParameterGroup, + ActivatableParameterGroupHG, + ConfigHistoryNode, + HostGroupConfig, + ObjectConfig, + Parameter, + ParameterGroup, + ParameterGroupHG, + ParameterHG, +) + +__all__ = [ + "ConfigHistoryNode", + "ObjectConfig", + "ActionConfig", + "HostGroupConfig", + "Parameter", + "ParameterHG", + "ParameterGroup", + "ParameterGroupHG", + "ActivatableParameterGroup", + "ActivatableParameterGroupHG", +] diff --git a/adcm_aio_client/core/config/_objects.py b/adcm_aio_client/core/config/_objects.py new file mode 100644 index 00000000..f5fdbdfb --- /dev/null +++ b/adcm_aio_client/core/config/_objects.py @@ -0,0 +1,550 @@ +from copy import deepcopy +from functools import partial +from typing import Any, Callable, Coroutine, Protocol, Self, overload +import json +import asyncio + +from adcm_aio_client.core.config._operations import find_config_difference +from adcm_aio_client.core.config.refresh import apply_local_changes +from adcm_aio_client.core.config.types import ( + AnyParameterName, + ConfigData, + ConfigDifference, + ConfigRefreshStrategy, + ConfigSchema, + LevelNames, + LocalConfigs, +) +from adcm_aio_client.core.errors import ( + BadRequestError, + ConfigComparisonError, + ConfigNoParameterError, + InvalidConfigError, + RequesterError, +) +from adcm_aio_client.core.types import AwareOfOwnPath, WithRequesterProperty + + +class ConfigOwner(WithRequesterProperty, AwareOfOwnPath, Protocol): ... + + +# Config Entries Wrappers + + +class _ConfigWrapper: + __slots__ = ("_name", "_schema", "_data") + + def __init__( + self: Self, + name: LevelNames, + data: ConfigData, + schema: ConfigSchema, + ) -> None: + self._name = name + self._schema = schema + self._data = data + + def _on_data_change(self: Self) -> None: + pass + + +class _Group(_ConfigWrapper): + __slots__ = ("_name", "_schema", "_data", "_wrappers_cache") + + def __init__(self: Self, name: LevelNames, data: ConfigData, schema: ConfigSchema) -> None: + super().__init__(name, data, schema) + self._wrappers_cache = {} + + def _find_and_wrap_config_entry[ValueW: _ConfigWrapper, GroupW: _ConfigWrapper, AGroupW: _ConfigWrapper]( + self: Self, + item: AnyParameterName | tuple[AnyParameterName, type[ValueW | GroupW | AGroupW]], + value_class: type[ValueW], + group_class: type[GroupW], + a_group_class: type[AGroupW], + ) -> ValueW | GroupW | AGroupW: + if isinstance(item, str): + name = item + else: + name, *_ = item + + level_name = self._schema.get_level_name(group=self._name, display_name=name) + if level_name is None: + level_name = name + + cached_wrapper = self._wrappers_cache.get(level_name) + if cached_wrapper: + return cached_wrapper + + parameter_full_name = (*self._name, level_name) + + if not self._schema.is_visible_parameter(parameter_full_name): + message = f"No parameter named {name}" + if self._name: + message = f"{message} in group {'/'.join(self._name)}" + raise ConfigNoParameterError(message) + + class_ = value_class + if self._schema.is_group(parameter_full_name): + class_ = a_group_class if self._schema.is_activatable_group(parameter_full_name) else group_class + + wrapper = class_(name=parameter_full_name, data=self._data, schema=self._schema) + + self._wrappers_cache[level_name] = wrapper + + return wrapper + + def _on_data_change(self: Self) -> None: + # need to drop caches when data is changed, + # because each entry may already point to a different data + # and return incorrect nodes for a search (=> can't be edited too) + self._wrappers_cache = {} + + +class Parameter[T](_ConfigWrapper): + @property + def value(self: Self) -> T: + # todo probably want to return read-only proxies for list/dict + try: + return self._data.get_value(parameter=self._name) + except (TypeError, KeyError): + if len(self._name) == 1: + # not in any sort of group, should continue with exception + raise + + return self._schema.get_default(self._name) + + def set(self: Self, value: Any) -> Self: # noqa: ANN401 + try: + self._data.set_value(parameter=self._name, value=value) + except (TypeError, KeyError) as err: + if len(self._name) == 1: + # not in any sort of group, should continue with exception + raise + + self._set_parent_groups_to_defaults(err=err) + self._data.set_value(parameter=self._name, value=value) + + return self + + def _set_parent_groups_to_defaults(self: Self, err: Exception) -> None: + # find first `None` group + root_group_name, *rest = self._name[:-1] + group = (root_group_name,) + + while rest: + value_ = self._data.get_value(group) + if value_ is None: + break + + next_group_name, *rest = rest + group = (*group, next_group_name) + + value_ = self._data.get_value(group) + if value_ is not None: + # error was legit and not about None group + raise err + + # actually build defaults + defaults = self._schema.get_default(group) + self._data.set_value(group, defaults) + + +class _Desyncable(_ConfigWrapper): + def sync(self: Self) -> Self: + self._data.set_attribute(parameter=self._name, attribute="isSynced", value=True) + return self + + def desync(self: Self) -> Self: + self._data.set_attribute(parameter=self._name, attribute="isSynced", value=False) + return self + + +class ParameterHG[T](_Desyncable, Parameter[T]): + def set(self: Self, value: Any) -> Self: # noqa: ANN401 + super().set(value) + self.desync() + return self + + +class ParameterGroup(_Group): + @overload + def __getitem__[ExpectedType: "ConfigEntry"]( + self: Self, item: tuple[AnyParameterName, type[ExpectedType]] + ) -> ExpectedType: ... + + @overload + def __getitem__(self: Self, item: AnyParameterName) -> "ConfigEntry": ... + + def __getitem__[ExpectedType: "ConfigEntry"]( + self: Self, item: AnyParameterName | tuple[AnyParameterName, type[ExpectedType]] + ) -> "ConfigEntry": + """ + Get config entry by given display name (or "technical" name). + + Item is either a string (name) or tuple with name on first position + and type info at second. + + NOTE: types aren't checked, they are just helpers for users' type checking setups. + """ + return self._find_and_wrap_config_entry( + item=item, value_class=Parameter, group_class=ParameterGroup, a_group_class=ActivatableParameterGroup + ) + + +class ParameterGroupHG(_Group): + @overload + def __getitem__[ExpectedType: "ConfigEntryHG"]( + self: Self, item: tuple[AnyParameterName, type[ExpectedType]] + ) -> ExpectedType: ... + + @overload + def __getitem__(self: Self, item: AnyParameterName) -> "ConfigEntryHG": ... + + def __getitem__[ExpectedType: "ConfigEntryHG"]( + self: Self, item: AnyParameterName | tuple[AnyParameterName, type[ExpectedType]] + ) -> "ConfigEntryHG": + """ + Get config entry by given display name (or "technical" name). + + Item is either a string (name) or tuple with name on first position + and type info at second. + + NOTE: types aren't checked, they are just helpers for users' type checking setups. + """ + return self._find_and_wrap_config_entry( + item=item, + value_class=ParameterHG, + group_class=ParameterGroupHG, + a_group_class=ActivatableParameterGroupHG, + ) + + +class _Activatable(_Group): + def activate(self: Self) -> Self: + self._data.set_attribute(parameter=self._name, attribute="isActive", value=True) + return self + + def deactivate(self: Self) -> Self: + self._data.set_attribute(parameter=self._name, attribute="isActive", value=False) + return self + + +class ActivatableParameterGroup(_Activatable, ParameterGroup): ... + + +class ActivatableParameterGroupHG(_Desyncable, _Activatable, ParameterGroup): + def activate(self: Self) -> Self: + super().activate() + self.desync() + return self + + def deactivate(self: Self) -> Self: + super().deactivate() + self.desync() + return self + + +class _ConfigWrapperCreator(_ConfigWrapper): + @property + def config(self: Self) -> ConfigData: + return self._data + + def change_data(self: Self, new_data: ConfigData) -> ConfigData: + self._data = new_data + self._on_data_change() + return self._data + + +class ObjectConfigWrapper(ParameterGroup, _ConfigWrapperCreator): ... + + +class HostGroupConfigWrapper(ParameterGroupHG, _ConfigWrapperCreator): ... + + +type ConfigEntry = Parameter | ParameterGroup | ActivatableParameterGroup +type ConfigEntryHG = ParameterHG | ParameterGroupHG | ActivatableParameterGroupHG + +# API Objects + + +class _GeneralConfig[T: _ConfigWrapperCreator]: + __slots__ = ("_schema", "_parent", "_initial_config", "_current_config", "_wrapper_class") + + _wrapper_class: type[T] + + def __init__(self: Self, config: ConfigData, schema: ConfigSchema, parent: ConfigOwner) -> None: + self._schema = schema + self._initial_config: ConfigData = self._parse_json_fields_inplace_safe(config) + self._current_config = self._wrapper_class(data=deepcopy(self._initial_config), schema=self._schema, name=()) + self._parent = parent + + # Public Interface (for End User) + + @property + def id(self: Self) -> int: + return self._initial_config.id + + @property + def description(self: Self) -> str: + return self._initial_config.description + + def reset(self: Self) -> Self: + self._current_config.change_data(new_data=deepcopy(self._initial_config)) + return self + + def difference(self: Self, other: Self, *, other_is_previous: bool = True) -> ConfigDifference: + if self.schema != other.schema: + message = f"Schema of configuration {other.id} doesn't match schema of {self.id}" + raise ConfigComparisonError(message) + + if other_is_previous: + previous = other + current = self + else: + previous = self + current = other + + full_diff = find_config_difference(previous=previous.data, current=current.data, schema=self._schema) + return ConfigDifference.from_full_format(full_diff) + + # Public For Internal Use Only + + @property + def schema(self: Self) -> ConfigSchema: + return self._schema + + @property + def data(self: Self) -> ConfigData: + return self._current_config.config + + # Private + def _parse_json_fields_inplace_safe(self: Self, config: ConfigData) -> ConfigData: + return self._apply_to_all_json_fields(func=json.loads, when=lambda value: isinstance(value, str), config=config) + + def _serialize_json_fields_inplace_safe(self: Self, config: ConfigData) -> ConfigData: + return self._apply_to_all_json_fields(func=json.dumps, when=lambda value: value is not None, config=config) + + def _apply_to_all_json_fields( + self: Self, func: Callable, when: Callable[[Any], bool], config: ConfigData + ) -> ConfigData: + for parameter_name in self._schema.json_fields: + input_value = config.get_value(parameter_name) + if when(input_value): + parsed_value = func(input_value) + config.set_value(parameter_name, parsed_value) + + return config + + async def _retrieve_current_config(self: Self) -> ConfigData: + configs_path = (*self._parent.get_own_path(), "configs") + + history_response = await self._parent.requester.get( + *configs_path, query={"ordering": "-id", "limit": 5, "offset": 0} + ) + + current_config_entry = get_current_config(results=history_response.as_dict()["results"]) + config_id = current_config_entry["id"] + + if config_id == self.id: + return self._initial_config + + config_response = await self._parent.requester.get(*configs_path, config_id) + + config_data = ConfigData.from_v2_response(data_in_v2_format=config_response.as_dict()) + + return self._parse_json_fields_inplace_safe(config_data) + + +class _SaveableConfig[T: _ConfigWrapperCreator](_GeneralConfig[T]): + async def refresh(self: Self, strategy: ConfigRefreshStrategy = apply_local_changes) -> Self: + remote_config = await retrieve_current_config( + parent=self._parent, get_schema=partial(retrieve_schema, parent=self._parent) + ) + if self.schema != remote_config.schema: + message = "Can't refresh configuration after upgrade: schema is different for local and remote configs" + raise ConfigComparisonError(message) + + local = LocalConfigs(initial=self._initial_config, changed=self._current_config.config) + merged_config = strategy(local=local, remote=remote_config.data, schema=self._schema) + + self._initial_config = remote_config.data + self._current_config.change_data(new_data=merged_config) + + return self + + async def save(self: Self, description: str = "") -> Self: + config_to_save = self._current_config.config + self._serialize_json_fields_inplace_safe(config_to_save) + payload = {"description": description, "config": config_to_save.values, "adcmMeta": config_to_save.attributes} + + try: + response = await self._parent.requester.post(*self._parent.get_own_path(), "configs", data=payload) + except RequesterError as e: + # config isn't saved, no data update is in play, + # returning "pre-saved" parsed values + self._parse_json_fields_inplace_safe(config_to_save) + if isinstance(e, BadRequestError): + raise InvalidConfigError(*e.args) from None + raise + + try: + response = await self._parent.requester.post(*self._parent.get_own_path(), "configs", data=payload) + except RequesterError as e: + # config isn't saved, no data update is in play, + # returning "pre-saved" parsed values + self._parse_json_fields_inplace_safe(config_to_save) + + if isinstance(e, BadRequestError): + raise InvalidConfigError(*e.args) from None + raise + else: + new_config = ConfigData.from_v2_response(data_in_v2_format=response.as_dict()) + self._initial_config = self._parse_json_fields_inplace_safe(new_config) + self.reset() + + return self + + +class ActionConfig(_GeneralConfig[ObjectConfigWrapper]): + _wrapper_class = ObjectConfigWrapper + + @overload + def __getitem__[ExpectedType: ConfigEntry]( + self: Self, item: tuple[AnyParameterName, type[ExpectedType]] + ) -> ExpectedType: ... + + @overload + def __getitem__(self: Self, item: AnyParameterName) -> ConfigEntry: ... + + def __getitem__[ExpectedType: ConfigEntry]( + self: Self, item: AnyParameterName | tuple[AnyParameterName, type[ExpectedType]] + ) -> ConfigEntry: + return self._current_config[item] + + def _to_payload(self: Self) -> dict: + # don't want complexity of regular config with rollbacks on failure + config_to_save = deepcopy(self._current_config.config) + self._serialize_json_fields_inplace_safe(config_to_save) + return {"config": config_to_save.values, "adcmMeta": config_to_save.attributes} + + +class ObjectConfig(_SaveableConfig[ObjectConfigWrapper]): + _wrapper_class = ObjectConfigWrapper + + # todo fix typing copy-paste + @overload + def __getitem__[ExpectedType: ConfigEntry]( + self: Self, item: tuple[AnyParameterName, type[ExpectedType]] + ) -> ExpectedType: ... + + @overload + def __getitem__(self: Self, item: AnyParameterName) -> ConfigEntry: ... + + def __getitem__[ExpectedType: ConfigEntry]( + self: Self, item: AnyParameterName | tuple[AnyParameterName, type[ExpectedType]] + ) -> ConfigEntry: + return self._current_config[item] + + +class HostGroupConfig(_SaveableConfig[HostGroupConfigWrapper]): + _wrapper_class = HostGroupConfigWrapper + + @overload + def __getitem__[ExpectedType: ConfigEntryHG]( + self: Self, item: tuple[AnyParameterName, type[ExpectedType]] + ) -> ExpectedType: ... + + @overload + def __getitem__(self: Self, item: AnyParameterName) -> ConfigEntryHG: ... + + def __getitem__[ExpectedType: ConfigEntryHG]( + self: Self, item: AnyParameterName | tuple[AnyParameterName, type[ExpectedType]] + ) -> "ConfigEntryHG": + return self._current_config[item] + + +class ConfigHistoryNode: + def __init__(self: Self, parent: ConfigOwner) -> None: + self._schema: ConfigSchema | None = None + self._parent = parent + + async def current(self: Self) -> ObjectConfig: + return await retrieve_current_config(parent=self._parent, get_schema=self._ensure_schema) + + async def __getitem__(self: Self, position: int) -> ObjectConfig: + # since we don't have date in here, we sort by id + ordering = "id" + offset = position + if offset < 0: + ordering = "-id" + # `-1` is the same as `0` in reverse order + offset = abs(offset) - 1 + + query = {"limit": 1, "offset": offset, "ordering": ordering} + + return await retrieve_config( + parent=self._parent, get_schema=self._ensure_schema, query=query, choose_suitable_config=get_first_result + ) + + async def _ensure_schema(self: Self) -> ConfigSchema: + if self._schema is not None: + return self._schema + + self._schema = await retrieve_schema(parent=self._parent) + + return self._schema + + +type GetSchemaFunc = Callable[[], Coroutine[Any, Any, ConfigSchema]] + + +async def retrieve_schema(parent: ConfigOwner) -> ConfigSchema: + response = await parent.requester.get(*parent.get_own_path(), "config-schema") + return ConfigSchema(spec_as_jsonschema=response.as_dict()) + + +async def retrieve_current_config(parent: ConfigOwner, get_schema: GetSchemaFunc) -> ObjectConfig: + # we are relying that current configuration will be + # one of last created + query = {"ordering": "-id", "limit": 10, "offset": 0} + return await retrieve_config( + parent=parent, get_schema=get_schema, query=query, choose_suitable_config=get_current_config + ) + + +async def retrieve_config( + parent: ConfigOwner, + get_schema: GetSchemaFunc, + query: dict, + choose_suitable_config: Callable[[list[dict]], dict], +) -> ObjectConfig: + schema_task = asyncio.create_task(get_schema()) + + path = (*parent.get_own_path(), "configs") + + config_records_response = await parent.requester.get(*path, query=query) + config_record = choose_suitable_config(config_records_response.as_dict()["results"]) + + config_data_response = await parent.requester.get(*path, config_record["id"]) + config_data = ConfigData.from_v2_response(data_in_v2_format=config_data_response.as_dict()) + + schema = await schema_task + + return ObjectConfig(config=config_data, schema=schema, parent=parent) + + +def get_first_result(results: list[dict]) -> dict: + try: + return results[0] + except KeyError as e: + message = "Configuration can't be found" + raise RuntimeError(message) from e + + +def get_current_config(results: list[dict]) -> dict: + for config in results: + if config["isCurrent"]: + return config + + message = "Failed to determine current configuraiton" + raise RuntimeError(message) diff --git a/adcm_aio_client/core/config/_operations.py b/adcm_aio_client/core/config/_operations.py new file mode 100644 index 00000000..5cf639a4 --- /dev/null +++ b/adcm_aio_client/core/config/_operations.py @@ -0,0 +1,56 @@ +from adcm_aio_client.core.config.types import ( + ConfigSchema, + FullConfigDifference, + GenericConfigData, + LevelNames, + ValueChange, + full_name_to_level_names, +) + + +# Difference +def find_config_difference( + previous: GenericConfigData, current: GenericConfigData, schema: ConfigSchema +) -> FullConfigDifference: + diff = FullConfigDifference(schema=schema) + + _fill_values_diff_at_level(level=(), diff=diff, previous=previous.values, current=current.values) + _fill_attributes_diff(diff=diff, previous=previous.attributes, current=current.attributes) + + return diff + + +def _fill_values_diff_at_level(level: LevelNames, diff: FullConfigDifference, previous: dict, current: dict) -> None: + missing = object() + for key, cur_value in current.items(): + level_names = (*level, key) + prev_value = previous.get(key, missing) + + if prev_value is missing: + # there may be collision between two None's, but for now we'll consider it a "special case" + diff.values[level_names] = ValueChange(previous=None, current=cur_value) + continue + + if cur_value == prev_value: + continue + + if not (diff.schema.is_group(level_names) and isinstance(prev_value, dict) and (isinstance(cur_value, dict))): + diff.values[level_names] = ValueChange(previous=prev_value, current=cur_value) + continue + + _fill_values_diff_at_level(diff=diff, level=level_names, previous=prev_value, current=cur_value) + + +def _fill_attributes_diff(diff: FullConfigDifference, previous: dict, current: dict) -> None: + missing = object() + for full_name, cur_value in current.items(): + prev_value = previous.get(full_name, missing) + if cur_value == prev_value: + continue + + level_names = full_name_to_level_names(full_name) + + if prev_value is missing: + prev_value = None + + diff.attributes[level_names] = ValueChange(previous=prev_value, current=cur_value) diff --git a/adcm_aio_client/core/config/refresh.py b/adcm_aio_client/core/config/refresh.py new file mode 100644 index 00000000..c67062b9 --- /dev/null +++ b/adcm_aio_client/core/config/refresh.py @@ -0,0 +1,70 @@ +from adcm_aio_client.core.config._operations import find_config_difference +from adcm_aio_client.core.config.types import ConfigData, ConfigSchema, LocalConfigs + + +def apply_local_changes(local: LocalConfigs, remote: ConfigData, schema: ConfigSchema) -> ConfigData: + if local.initial.id == remote.id: + return local.changed + + local_diff = find_config_difference(previous=local.initial, current=local.changed, schema=schema) + if local_diff.is_empty: + # no changed, nothing to apply + return remote + + for parameter_name, value_change in local_diff.values.items(): + remote.set_value(parameter=parameter_name, value=value_change.current) + + for parameter_name, attribute_change in local_diff.attributes.items(): + if not isinstance(attribute_change, dict): + message = f"Can't apply attribute changes of type {type(attribute_change)}, expected dict-like" + raise TypeError(message) + + for attribute_name, value in attribute_change.current.items(): + remote.set_attribute(parameter=parameter_name, attribute=attribute_name, value=value) + + return remote + + +def apply_remote_changes(local: LocalConfigs, remote: ConfigData, schema: ConfigSchema) -> ConfigData: + if local.initial.id == remote.id: + return local.changed + + local_diff = find_config_difference(previous=local.initial, current=local.changed, schema=schema) + if local_diff.is_empty: + return remote + + remote_diff = find_config_difference(previous=local.initial, current=remote, schema=schema) + + locally_changed = set(local_diff.values.keys()) + changed_in_both = locally_changed.intersection(remote_diff.values.keys()) + changed_locally_only = locally_changed.difference(remote_diff.values.keys()) + + for parameter_name in changed_in_both: + remote.set_value(parameter=parameter_name, value=remote_diff.values[parameter_name].current) + + for parameter_name in changed_locally_only: + remote.set_value(parameter=parameter_name, value=local_diff.values[parameter_name].current) + + locally_changed = set(local_diff.attributes.keys()) + changed_in_both = locally_changed.intersection(remote_diff.attributes.keys()) + changed_locally_only = locally_changed.difference(remote_diff.attributes.keys()) + + for parameter_name in changed_in_both: + attribute_change = remote_diff.attributes[parameter_name] + if not isinstance(attribute_change, dict): + message = f"Can't apply attribute changes of type {type(attribute_change)}, expected dict-like" + raise TypeError(message) + + for attribute_name, value in attribute_change.current.items(): + remote.set_attribute(parameter=parameter_name, attribute=attribute_name, value=value) + + for parameter_name in changed_locally_only: + attribute_change = local_diff.attributes[parameter_name] + if not isinstance(attribute_change, dict): + message = f"Can't apply attribute changes of type {type(attribute_change)}, expected dict-like" + raise TypeError(message) + + for attribute_name, value in attribute_change.current.items(): + remote.set_attribute(parameter=parameter_name, attribute=attribute_name, value=value) + + return remote diff --git a/adcm_aio_client/core/config/types.py b/adcm_aio_client/core/config/types.py new file mode 100644 index 00000000..0a7d782d --- /dev/null +++ b/adcm_aio_client/core/config/types.py @@ -0,0 +1,341 @@ +from abc import ABC +from collections import defaultdict +from dataclasses import dataclass, field +from functools import reduce +from typing import Any, Callable, Iterable, NamedTuple, Protocol, Self + +# External Section +# these functions are heavily inspired by configuration rework in ADCM (ADCM-6034) + + +type ParameterName = str +type ParameterDisplayName = str +type AnyParameterName = ParameterName | ParameterDisplayName + +type LevelNames = tuple[ParameterName, ...] +type ParameterFullName = str +""" +Name inclusing all level names joined with (and prefixed by) `/` +""" + +ROOT_PREFIX = "/" + + +def set_nested_config_value[T](config: dict[str, Any], level_names: LevelNames, value: T) -> T: + group, level_name = get_group_with_value(config=config, level_names=level_names) + group[level_name] = value + return value + + +def change_nested_config_value[T](config: dict[str, Any], level_names: LevelNames, func: Callable[[Any], T]) -> T: + group, level_name = get_group_with_value(config=config, level_names=level_names) + group[level_name] = func(group[level_name]) + return group[level_name] + + +def get_nested_config_value(config: dict[str, Any], level_names: LevelNames) -> Any: # noqa: ANN401 + group, level_name = get_group_with_value(config=config, level_names=level_names) + return group[level_name] + + +def get_group_with_value(config: dict[str, Any], level_names: LevelNames) -> tuple[dict[str, Any], ParameterName]: + return _get_group_with_value(config=config, level_names=level_names) + + +def _get_group_with_value( + config: dict[str, Any], level_names: Iterable[ParameterName] +) -> tuple[dict[str, Any], ParameterName]: + level_name, *rest = level_names + if not rest: + return config, level_name + + return _get_group_with_value(config=config[level_name], level_names=rest) + + +def level_names_to_full_name(levels: LevelNames) -> str: + return ensure_full_name("/".join(levels)) + + +def full_name_to_level_names(full: ParameterFullName) -> tuple[ParameterName, ...]: + return tuple(filter(bool, full.split("/"))) + + +def ensure_full_name(name: str) -> str: + if not name.startswith(ROOT_PREFIX): + return f"{ROOT_PREFIX}{name}" + + return name + + +# External Section End + + +class GenericConfigData(ABC): # noqa: B024 + __slots__ = ("_values", "_attributes") + + def __init__(self: Self, values: dict, attributes: dict) -> None: + self._values = values + self._attributes = attributes + + @property + def values(self: Self) -> dict: + return self._values + + @property + def attributes(self: Self) -> dict: + return self._attributes + + def get_value(self: Self, parameter: LevelNames) -> Any: # noqa: ANN401 + return get_nested_config_value(config=self._values, level_names=parameter) + + def set_value[T](self: Self, parameter: LevelNames, value: T) -> T: + return set_nested_config_value(config=self._values, level_names=parameter, value=value) + + def get_attribute(self: Self, parameter: LevelNames, attribute: str) -> bool: + full_name = level_names_to_full_name(parameter) + return self._attributes[full_name][attribute] + + def set_attribute(self: Self, parameter: LevelNames, attribute: str, value: bool) -> bool: # noqa: FBT001 + full_name = level_names_to_full_name(parameter) + self._attributes[full_name][attribute] = value + return value + + +class ActionConfigData(GenericConfigData): + __slots__ = GenericConfigData.__slots__ + + +class ConfigData(GenericConfigData): + __slots__ = ("id", "description", "_values", "_attributes") + + def __init__(self: Self, id: int, description: str, values: dict, attributes: dict) -> None: # noqa: A002 + self.id = id + self.description = description + super().__init__(values=values, attributes=attributes) + + @classmethod + def from_v2_response(cls: type[Self], data_in_v2_format: dict) -> Self: + return cls( + id=int(data_in_v2_format["id"]), + description=str(data_in_v2_format["description"]), + values=data_in_v2_format["config"], + attributes=data_in_v2_format["adcmMeta"], + ) + + +@dataclass(slots=True) +class ValueChange: + previous: Any + current: Any + + +def recursive_defaultdict() -> defaultdict: + return defaultdict(recursive_defaultdict) + + +@dataclass(slots=True) +class FullConfigDifference: + schema: "ConfigSchema" + values: dict[LevelNames, ValueChange] = field(default_factory=dict) + attributes: dict[LevelNames, ValueChange] = field(default_factory=dict) + + @property + def is_empty(self: Self) -> bool: + return not bool(self.values or self.attributes) + + +class ConfigDifference: + __slots__ = ("_schema", "_values", "_attributes") + + def __init__( + self: Self, + schema: "ConfigSchema", + values: dict[LevelNames, ValueChange], + attributes: dict[LevelNames, ValueChange], + ) -> None: + self._schema = schema + self._values = values + self._attributes = attributes + + @classmethod + def from_full_format(cls: type[Self], diff: FullConfigDifference) -> Self: + visible_value_changes = {k: v for k, v in diff.values.items() if not diff.schema.is_invisible(k)} + visible_attr_changes = {k: v for k, v in diff.attributes.items() if not diff.schema.is_invisible(k)} + return cls(schema=diff.schema, values=visible_value_changes, attributes=visible_attr_changes) + + def __str__(self: Self) -> str: + values_nested = self._to_nested_dict(self._values) + attributes_nested = self._to_nested_dict(self._attributes) + + if not (values_nested or attributes_nested): + return "No Changes" + + values_repr = f"Changed Values:\n{values_nested}" if values_nested else "" + attributes_repr = f"Changed Attributes:\n{attributes_nested}" if attributes_nested else "" + + return "\n\n".join((values_repr, attributes_repr)) + + def _to_nested_dict(self: Self, changes: dict[LevelNames, ValueChange]) -> dict: + result = recursive_defaultdict() + + for names, change in changes.items(): + changes_repr = self._prepare_change(change) + + if len(names) == 1: + result[names[0]] = changes_repr + continue + + *groups, name = names + group_node = reduce(dict.__getitem__, groups, result) + group_node[name] = changes_repr + + # get rid of `defaultdict` in favor of `dict` + # may be not optimal + return self._simplify_dict(result) + + def _prepare_change(self: Self, change: ValueChange) -> tuple | dict: + if not (isinstance(change.previous, dict) and isinstance(change.current, dict)): + return (change.previous, change.current) + + dict_diff = {} + + for key, cur_value in change.current.items(): + prev_value = change.previous.get(key) + if prev_value != cur_value: + dict_diff[key] = self._prepare_change(change=ValueChange(previous=prev_value, current=cur_value)) + + missing_in_current = set(change.previous.keys()).difference(change.current.keys()) + for key in missing_in_current: + dict_diff[key] = self._prepare_change(change=ValueChange(previous=change.previous[key], current=None)) + + return dict_diff + + def _simplify_dict(self: Self, dd: dict) -> dict: + simplified = {} + + for k, v in dd.items(): + if isinstance(v, dict): + v = self._simplify_dict(v) + + simplified[k] = v + + return simplified + + +class ConfigSchema: + def __init__(self: Self, spec_as_jsonschema: dict) -> None: + self._raw = spec_as_jsonschema + + self._jsons: set[LevelNames] = set() + self._groups: set[LevelNames] = set() + self._activatable_groups: set[LevelNames] = set() + self._invisible_fields: set[LevelNames] = set() + self._display_name_map: dict[tuple[LevelNames, ParameterDisplayName], ParameterName] = {} + self._param_map: dict[LevelNames, dict] = {} + + self._analyze_schema() + + def __eq__(self: Self, value: object) -> bool: + if not isinstance(value, ConfigSchema): + return NotImplemented + + this_name_type_mapping = self._retrieve_name_type_mapping() + other_name_type_mapping = value._retrieve_name_type_mapping() + + return this_name_type_mapping == other_name_type_mapping + + @property + def json_fields(self: Self) -> set[LevelNames]: + return self._jsons + + def is_group(self: Self, parameter_name: LevelNames) -> bool: + return parameter_name in self._groups + + def is_activatable_group(self: Self, parameter_name: LevelNames) -> bool: + return parameter_name in self._activatable_groups + + def is_invisible(self: Self, parameter_name: LevelNames) -> bool: + return parameter_name in self._invisible_fields + + def is_visible_parameter(self: Self, parameter_name: LevelNames) -> bool: + return parameter_name in self._param_map and not self.is_invisible(parameter_name) + + def get_level_name(self: Self, group: LevelNames, display_name: ParameterDisplayName) -> ParameterName | None: + key = (group, display_name) + return self._display_name_map.get(key) + + def get_default(self: Self, parameter_name: LevelNames) -> Any: # noqa: ANN401 + param_spec = self._param_map[parameter_name] + if not self.is_group(parameter_name): + return param_spec.get("default", None) + + return {child_name: self.get_default((*parameter_name, child_name)) for child_name in param_spec["properties"]} + + def _analyze_schema(self: Self) -> None: + for level_names, param_spec in self._iterate_parameters(object_schema=self._raw): + if is_group_v2(param_spec): + self._groups.add(level_names) + + if is_activatable_v2(param_spec): + self._activatable_groups.add(level_names) + + elif is_json_v2(param_spec): + self._jsons.add(level_names) + + if param_spec.get("adcmMeta", {}).get("isInvisible"): + self._invisible_fields.add(level_names) + + *group, own_level_name = level_names + display_name = param_spec["title"] + self._display_name_map[tuple(group), display_name] = own_level_name + self._param_map[level_names] = param_spec + + def _retrieve_name_type_mapping(self: Self) -> dict[LevelNames, str]: + return { + level_names: param_spec.get("type", "enum") + for level_names, param_spec in self._iterate_parameters(object_schema=self._raw) + } + + def _iterate_parameters(self: Self, object_schema: dict) -> Iterable[tuple[LevelNames, dict]]: + for level_name, optional_attrs in object_schema["properties"].items(): + attributes = self._unwrap_optional(optional_attrs) + + yield (level_name,), attributes + + if is_group_v2(attributes): + for inner_level, inner_optional_attrs in self._iterate_parameters(attributes): + inner_attributes = self._unwrap_optional(inner_optional_attrs) + yield (level_name, *inner_level), inner_attributes + + def _unwrap_optional(self: Self, attributes: dict) -> dict: + if "oneOf" not in attributes: + return attributes + + # bald search, a lot may fail, + # but for more precise work with spec if require incapsulation in a separate handler class + return next(entry for entry in attributes["oneOf"] if entry.get("type") != "null") + + +def is_group_v2(attributes: dict) -> bool: + return attributes.get("type") == "object" and attributes.get("additionalProperties") is False + + +def is_activatable_v2(attributes: dict) -> bool: + return (attributes["adcmMeta"].get("activation") or {}).get("isAllowChange", False) + + +def is_json_v2(attributes: dict) -> bool: + return attributes.get("format") == "json" + + +class LocalConfigs(NamedTuple): + initial: ConfigData + changed: ConfigData + + +class ConfigRefreshStrategy(Protocol): + def __call__(self: Self, local: LocalConfigs, remote: ConfigData, schema: ConfigSchema) -> ConfigData: + """ + `remote` may be changed according to strategy, so it shouldn't be "read-only"/"initial" + """ + ... diff --git a/adcm_aio_client/core/errors.py b/adcm_aio_client/core/errors.py new file mode 100644 index 00000000..7845ab91 --- /dev/null +++ b/adcm_aio_client/core/errors.py @@ -0,0 +1,171 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class ADCMClientError(Exception): + pass + + +class WaitTimeoutError(ADCMClientError): + pass + + +# Session + + +class ClientInitError(ADCMClientError): + pass + + +# Version + + +class NotSupportedVersionError(ADCMClientError): + pass + + +# Requester + + +class RequesterError(ADCMClientError): + pass + + +class NoCredentialsError(RequesterError): + pass + + +class AuthenticationError(RequesterError): + pass + + +class LoginError(RequesterError): + pass + + +class LogoutError(RequesterError): + pass + + +class RetryRequestError(RequesterError): + pass + + +class ResponseDataConversionError(RequesterError): + pass + + +class UnknownError(RequesterError): + pass + + +class BadRequestError(UnknownError): + pass + + +class UnauthorizedError(UnknownError): + pass + + +class PermissionDeniedError(UnknownError): + pass + + +class NotFoundError(UnknownError): + pass + + +class ConflictError(UnknownError): + pass + + +class ServerError(UnknownError): + pass + + +# Objects + + +class AccessorError(ADCMClientError): + pass + + +class MultipleObjectsReturnedError(AccessorError): + pass + + +class ObjectDoesNotExistError(AccessorError): + pass + + +class ObjectAlreadyExistsError(AccessorError): # TODO: add tests + pass + + +class OperationError(AccessorError): + pass + + +class HostNotInClusterError(ADCMClientError): ... + + +# Config + + +class ConfigError(ADCMClientError): ... + + +class ConfigComparisonError(ConfigError): ... + + +class ConfigNoParameterError(ConfigError): ... + + +# Action + + +class NoMappingInActionError(ADCMClientError): ... + + +class NoConfigInActionError(ADCMClientError): ... + + +# Filtering + + +class FilterError(ADCMClientError): ... + + +class InvalidFilterError(FilterError): ... + + +# Operation-related + + +class HostConflictError(ADCMClientError): + pass + + +class ObjectBlockedError(ADCMClientError): + pass + + +class InvalidMappingError(ADCMClientError): + pass + + +class InvalidConfigError(ADCMClientError): + pass + + +class ObjectUpdateError(ADCMClientError): + pass diff --git a/adcm_aio_client/core/filters.py b/adcm_aio_client/core/filters.py new file mode 100644 index 00000000..178ef301 --- /dev/null +++ b/adcm_aio_client/core/filters.py @@ -0,0 +1,162 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import deque +from dataclasses import dataclass +from typing import Generator, Iterable, Self + +from adcm_aio_client.core.errors import InvalidFilterError +from adcm_aio_client.core.objects._base import InteractiveObject +from adcm_aio_client.core.types import QueryParameters + +# Filters +EQUAL_OPERATIONS = frozenset(("eq", "ieq")) +MULTI_OPERATIONS = frozenset(("in", "iin", "exclude", "iexclude")) + + +COMMON_OPERATIONS = frozenset(("eq", "ne", "in", "exclude")) +STATUS_OPERATIONS = frozenset((*COMMON_OPERATIONS, *tuple(f"i{op}" for op in COMMON_OPERATIONS))) +ALL_OPERATIONS = frozenset(("contains", "icontains", *STATUS_OPERATIONS)) + +type FilterSingleValue = str | int | InteractiveObject +type FilterValue = FilterSingleValue | Iterable[FilterSingleValue] +type SimplifiedValue = str | int | tuple[str | int, ...] + + +@dataclass(slots=True) +class Filter: + attr: str + op: str + value: FilterValue + + +@dataclass(slots=True, frozen=True) +class FilterBy: + attr: str + operations: set[str] | frozenset[str] | tuple[str, ...] + single_input: type + + +class Filtering: + def __init__(self: Self, *allowed: FilterBy) -> None: + self._allowed = {entry.attr: entry for entry in allowed} + + def inline_filters_to_query(self: Self, filters: dict[str, FilterValue]) -> QueryParameters: + converted_filters = deque() + + for inline_filter, value in filters.items(): + try: + attr, op = inline_filter.rsplit("__", maxsplit=1) + except ValueError: + message = ( + f"Invalid inline filter format: {inline_filter}. " + "Attribute and operation should be joined with `__` for inline filters. " + f"Maybe you've meant `{inline_filter}__eq={value}`" + ) + raise InvalidFilterError(message) from None + + filter_ = Filter(attr=attr, op=op, value=value) + converted_filters.append(filter_) + + return self.to_query(filters=converted_filters) + + def to_query(self: Self, filters: Iterable[Filter]) -> QueryParameters: + query = {} + + for filter_ in filters: + # make value persistent + if isinstance(filter_.value, Generator): + filter_.value = tuple(filter_.value) + + self._check_allowed(filter_) + + name = self._attribute_name_to_camel_case(name=filter_.attr) + simplified_value = self._simplify_value(value=filter_.value) + self._check_no_operation_value_conflict(operation=filter_.op, value=simplified_value) + operation = filter_.op + value = self._prepare_query_param_value(value=simplified_value) + + query[f"{name}__{operation}"] = value + + return query + + def _check_allowed(self: Self, filter_: Filter) -> None: + allowed_filter = self._allowed.get(filter_.attr) + if not allowed_filter: + message = f"Filter by {filter_.attr} is not allowed. Allowed: {', '.join(self._allowed)}" + raise InvalidFilterError(message) + + if filter_.op not in allowed_filter.operations: + message = f"Operation {filter_.op} is not allowed. Allowed: {', '.join(sorted(allowed_filter.operations))}" + raise InvalidFilterError(message) + + expected_type = allowed_filter.single_input + if isinstance(filter_.value, Iterable): + if not all(isinstance(entry, expected_type) for entry in filter_.value): + message = f"At least one entry is not {expected_type}: {filter_.value}" + raise InvalidFilterError(message) + else: + if not isinstance(filter_.value, expected_type): + message = f"Value {filter_.value} is not {expected_type}" + raise InvalidFilterError(message) + + def _attribute_name_to_camel_case(self: Self, name: str) -> str: + first, *rest = name.split("_") + return f"{first}{''.join(map(str.capitalize, rest))}" + + def _simplify_value(self: Self, value: FilterValue) -> SimplifiedValue: + if isinstance(value, (str, int)): + return value + + if isinstance(value, InteractiveObject): + return value.id + + simplified_collection = deque() + + for entry in value: + if isinstance(entry, (str, int)): + simplified_collection.append(entry) + elif isinstance(entry, InteractiveObject): + simplified_collection.append(entry.id) + else: + message = f"Failed to simplify: {entry}" + raise TypeError(message) + + return tuple(simplified_collection) + + def _check_no_operation_value_conflict(self: Self, operation: str, value: SimplifiedValue) -> None: + is_collection = isinstance(value, tuple) + + if operation in MULTI_OPERATIONS: + if not is_collection: + message = f"Multiple values expected for {operation}" + raise InvalidFilterError(message) + + if not value: + message = "Collection for filter shouldn't be empty" + raise InvalidFilterError(message) + + else: + if is_collection: + message = f"Only one value is expected for {operation}" + raise InvalidFilterError(message) + + def _prepare_query_param_value(self: Self, value: SimplifiedValue) -> str: + if isinstance(value, tuple): + return ",".join(map(str, value)) + + return str(value) + + +FilterByName = FilterBy("name", ALL_OPERATIONS, str) +FilterByDisplayName = FilterBy("display_name", ALL_OPERATIONS, str) +FilterByStatus = FilterBy("status", STATUS_OPERATIONS, str) diff --git a/adcm_aio_client/core/host_groups/__init__.py b/adcm_aio_client/core/host_groups/__init__.py new file mode 100644 index 00000000..90174a91 --- /dev/null +++ b/adcm_aio_client/core/host_groups/__init__.py @@ -0,0 +1,4 @@ +from adcm_aio_client.core.host_groups.action_group import WithActionHostGroups +from adcm_aio_client.core.host_groups.config_group import WithConfigHostGroups + +__all__ = ["WithActionHostGroups", "WithConfigHostGroups"] diff --git a/adcm_aio_client/core/host_groups/_common.py b/adcm_aio_client/core/host_groups/_common.py new file mode 100644 index 00000000..7f804e47 --- /dev/null +++ b/adcm_aio_client/core/host_groups/_common.py @@ -0,0 +1,126 @@ +from functools import partial +from typing import TYPE_CHECKING, Any, Iterable, Self, Union + +from adcm_aio_client.core.errors import ConflictError, HostConflictError, ObjectAlreadyExistsError +from adcm_aio_client.core.filters import Filter +from adcm_aio_client.core.objects._accessors import ( + DefaultQueryParams as AccessorFilter, +) +from adcm_aio_client.core.objects._accessors import ( + PaginatedAccessor, + PaginatedChildAccessor, + filters_to_inline, +) +from adcm_aio_client.core.types import Endpoint, HostID, QueryParameters, Requester, RequesterResponse +from adcm_aio_client.core.utils import safe_gather + +if TYPE_CHECKING: + from adcm_aio_client.core.host_groups.action_group import ActionHostGroup + from adcm_aio_client.core.host_groups.config_group import ConfigHostGroup + from adcm_aio_client.core.objects.cm import Cluster, Component, Host, HostProvider, Service + + +class HostsInHostGroupNode(PaginatedAccessor["Host"]): + group_type: str + + def __new__(cls: type[Self], path: Endpoint, requester: Requester, accessor_filter: AccessorFilter = None) -> Self: + _ = path, requester, accessor_filter + if not hasattr(cls, "class_type"): + from adcm_aio_client.core.objects.cm import Host, HostsAccessor + + cls.class_type = Host + cls.filtering = HostsAccessor.filtering + + return super().__new__(cls) + + async def add(self: Self, host: Union["Host", Iterable["Host"], Filter]) -> None: + hosts = await self._get_hosts_from_args(host=host) + await self._add_hosts_to_group(h.id for h in hosts) + + async def remove(self: Self, host: Union["Host", Iterable["Host"], Filter]) -> None: + hosts = await self._get_hosts_from_args(host=host) + await self._remove_hosts_from_group(h.id for h in hosts) + + async def set(self: Self, host: Union["Host", Iterable["Host"], Filter]) -> None: + hosts = await self._get_hosts_from_args(host=host) + in_group_ids = {host["id"] for host in (await super()._request_endpoint(query={})).as_list()} + + to_remove_ids = {host_id for host_id in in_group_ids if host_id not in (host.id for host in hosts)} + to_add_ids = {host.id for host in hosts if host.id not in in_group_ids} + + if to_remove_ids: + await self._remove_hosts_from_group(ids=to_remove_ids) + if to_add_ids: + await self._add_hosts_to_group(ids=to_add_ids) + + async def _add_hosts_to_group(self: Self, ids: Iterable[HostID]) -> None: + add_by_id = partial(self._requester.post, *self._path) + try: + if error := await safe_gather( + coros=(add_by_id(data={"hostId": id_}) for id_ in ids), + msg=f"Some hosts can't be added to {self.group_type} host group", + ): + raise error + except* ConflictError as conflict_err_group: + host_conflict_msgs = {"already a member of this group", "already is a member of another group"} + if target_gr := conflict_err_group.subgroup(lambda e: any(msg in str(e) for msg in host_conflict_msgs)): + raise HostConflictError(*target_gr.exceptions[0].args) from None + raise + + async def _remove_hosts_from_group(self: Self, ids: Iterable[HostID]) -> None: + delete_by_id = partial(self._requester.delete, *self._path) + delete_coros = map(delete_by_id, ids) + error = await safe_gather( + coros=delete_coros, + msg=f"Some hosts can't be removed from {self.group_type} host group", + ) + + if error is not None: + raise error + + async def _get_hosts_from_args(self: Self, host: Union["Host", Iterable["Host"], Filter]) -> list["Host"]: + if isinstance(host, Filter): + inline_filters = filters_to_inline(host) + return await self.filter(**inline_filters) + + return list(host) if isinstance(host, Iterable) else [host] + + async def _request_endpoint( + self: Self, query: QueryParameters, filters: dict[str, Any] | None = None + ) -> RequesterResponse: + """HostGroup/hosts response have too little information to construct Host""" + + data = (await super()._request_endpoint(query, filters)).as_list() + ids = ",".join(str(host["id"]) for host in data) + query = {"id__in": ids} if ids else {"id__in": "-1"} # non-existent id to fetch 0 hosts + + return await self._requester.get("hosts", query=query) + + +class HostGroupNode[ + Parent: Cluster | Service | Component | HostProvider, + Child: ConfigHostGroup | ActionHostGroup, +](PaginatedChildAccessor[Parent, Child]): + async def create( # TODO: can create HG with subset of `hosts` if adding some of them leads to an error + self: Self, name: str, description: str = "", hosts: list["Host"] | None = None + ) -> Child: + try: + response = await self._requester.post(*self._path, data={"name": name, "description": description}) + except ConflictError as e: + if "already exists" in str(e): + raise ObjectAlreadyExistsError(*e.args) from None + raise + host_group = self.class_type(parent=self._parent, data=response.as_dict()) + + if not hosts: + return host_group + + path = *host_group.get_own_path(), "hosts" + error = await safe_gather( + coros=(self._requester.post(*path, data={"hostId": host.id}) for host in hosts), + msg=f"Some hosts can't be added to {host_group}", + ) + if error is not None: + raise error + + return host_group diff --git a/adcm_aio_client/core/host_groups/action_group.py b/adcm_aio_client/core/host_groups/action_group.py new file mode 100644 index 00000000..d6e6f17e --- /dev/null +++ b/adcm_aio_client/core/host_groups/action_group.py @@ -0,0 +1,46 @@ +from functools import cached_property +from typing import TYPE_CHECKING, Self, Union + +from adcm_aio_client.core.filters import FilterByName, Filtering +from adcm_aio_client.core.host_groups._common import HostGroupNode, HostsInHostGroupNode +from adcm_aio_client.core.objects._base import InteractiveChildObject +from adcm_aio_client.core.objects._common import Deletable, WithActions +from adcm_aio_client.core.types import AwareOfOwnPath, WithProtectedRequester + +if TYPE_CHECKING: + from adcm_aio_client.core.objects.cm import Cluster, Component, Service + + +class ActionHostGroup(InteractiveChildObject, WithActions, Deletable): + PATH_PREFIX = "action-host-groups" + + @property + def name(self: Self) -> str: + return self._data["name"] + + @property + def description(self: Self) -> str: + return self._data["description"] + + @cached_property + def hosts(self: Self) -> "HostsInActionHostGroupNode": + return HostsInActionHostGroupNode(path=(*self.get_own_path(), "hosts"), requester=self._requester) + + +class ActionHostGroupNode(HostGroupNode[Union["Cluster", "Service", "Component"], ActionHostGroup]): + class_type = ActionHostGroup + filtering = Filtering(FilterByName) + + +class HostsInActionHostGroupNode(HostsInHostGroupNode): + group_type = "action" + + +class WithActionHostGroups(WithProtectedRequester, AwareOfOwnPath): + @cached_property + def action_host_groups(self: Self) -> ActionHostGroupNode: + return ActionHostGroupNode( + parent=self, # pyright: ignore[reportArgumentType] easier to ignore than fix this typing + path=(*self.get_own_path(), "action-host-groups"), + requester=self._requester, + ) diff --git a/adcm_aio_client/core/host_groups/config_group.py b/adcm_aio_client/core/host_groups/config_group.py new file mode 100644 index 00000000..53ea4867 --- /dev/null +++ b/adcm_aio_client/core/host_groups/config_group.py @@ -0,0 +1,47 @@ +from functools import cached_property +from typing import TYPE_CHECKING, Self, Union + +from adcm_aio_client.core.filters import FilterByName, Filtering +from adcm_aio_client.core.host_groups._common import HostGroupNode, HostsInHostGroupNode +from adcm_aio_client.core.objects._base import InteractiveChildObject +from adcm_aio_client.core.objects._common import Deletable, WithConfig +from adcm_aio_client.core.types import AwareOfOwnPath, WithProtectedRequester + +if TYPE_CHECKING: + from adcm_aio_client.core.objects.cm import Cluster, Component, Service + + +class ConfigHostGroup(InteractiveChildObject, Deletable, WithConfig): + PATH_PREFIX = "config-groups" + + @property + def name(self: Self) -> str: + return self._data["name"] + + @property + def description(self: Self) -> str: + return self._data["description"] + + @cached_property + def hosts(self: Self) -> "HostsInConfigHostGroupNode": + return HostsInConfigHostGroupNode(path=(*self.get_own_path(), "hosts"), requester=self._requester) + + +class ConfigHostGroupNode(HostGroupNode[Union["Cluster", "Service", "Component"], ConfigHostGroup]): + class_type = ConfigHostGroup + filtering = Filtering(FilterByName) + # TODO: create() with `config` arg + + +class HostsInConfigHostGroupNode(HostsInHostGroupNode): + group_type = "config" + + +class WithConfigHostGroups(WithProtectedRequester, AwareOfOwnPath): + @cached_property + def config_host_groups(self: Self) -> ConfigHostGroupNode: + return ConfigHostGroupNode( + parent=self, # pyright: ignore[reportArgumentType] easier to ignore than fix this typing + path=(*self.get_own_path(), "config-groups"), + requester=self._requester, + ) diff --git a/adcm_aio_client/core/mapping/__init__.py b/adcm_aio_client/core/mapping/__init__.py new file mode 100644 index 00000000..8f707b69 --- /dev/null +++ b/adcm_aio_client/core/mapping/__init__.py @@ -0,0 +1,3 @@ +from adcm_aio_client.core.mapping._objects import ActionMapping, ClusterMapping + +__all__ = ["ActionMapping", "ClusterMapping"] diff --git a/adcm_aio_client/core/mapping/_objects.py b/adcm_aio_client/core/mapping/_objects.py new file mode 100644 index 00000000..ef110913 --- /dev/null +++ b/adcm_aio_client/core/mapping/_objects.py @@ -0,0 +1,226 @@ +from __future__ import annotations + +from collections.abc import Generator +from copy import copy +from functools import cached_property +from typing import TYPE_CHECKING, Any, Callable, Coroutine, Iterable, Self +import asyncio + +from adcm_aio_client.core.errors import BadRequestError, ConflictError, InvalidMappingError +from adcm_aio_client.core.filters import Filter, FilterByDisplayName, FilterByName, FilterByStatus, Filtering +from adcm_aio_client.core.mapping.refresh import apply_local_changes, apply_remote_changes +from adcm_aio_client.core.mapping.types import LocalMappings, MappingEntry, MappingPair, MappingRefreshStrategy +from adcm_aio_client.core.objects._accessors import NonPaginatedAccessor, filters_to_inline +from adcm_aio_client.core.types import ComponentID, HostID, Requester + +if TYPE_CHECKING: + from adcm_aio_client.core.objects.cm import Cluster, Component, Host, HostsAccessor, Service + + +class ComponentsMappingNode(NonPaginatedAccessor["Component"]): + filtering = Filtering(FilterByName, FilterByDisplayName, FilterByStatus) + + def __new__(cls: type[Self], cluster: Cluster, requester: Requester) -> Self: + _ = cluster, requester + + if not hasattr(cls, "class_type"): + from adcm_aio_client.core.objects.cm import Component + + cls.class_type = Component + + return super().__new__(cls) + + def __init__(self: Self, cluster: Cluster, requester: Requester) -> None: + path = (*cluster.get_own_path(), "mapping", "components") + super().__init__(path=path, requester=requester, default_query=None) + self._cluster = cluster + + def _create_object(self: Self, data: dict[str, Any]) -> Component: + from adcm_aio_client.core.objects.cm import Service + + # service data here should be enough, + # when not, we should use lazy objects + # or request services (means it should be async) + caches + service = Service(parent=self._cluster, data=data["service"]) + return self.class_type(parent=service, data=data) + + +class ActionMapping: + def __init__( + self: Self, owner: Cluster | Service | Component | Host, cluster: Cluster, entries: Iterable[MappingPair] + ) -> None: + self._owner = owner + self._cluster = cluster + self._requester = self._owner.requester + + self._components: dict[ComponentID, Component] = {} + self._hosts: dict[HostID, Host] = {} + + self._initial: set[MappingEntry] = set() + + for component, host in entries: + self._components[component.id] = component + self._hosts[host.id] = host + self._initial.add(MappingEntry(host_id=host.id, component_id=component.id)) + + self._current: set[MappingEntry] = copy(self._initial) + + def empty(self: Self) -> Self: + self._current.clear() + return self + + def all(self: Self) -> list[MappingPair]: + return list(self.iter()) + + def iter(self: Self) -> Generator[MappingPair, None, None]: + for entry in self._current: + yield self._components[entry.component_id], self._hosts[entry.host_id] + + async def add(self: Self, component: Component | Iterable[Component], host: Host | Iterable[Host] | Filter) -> Self: + components, hosts = await self._resolve_components_and_hosts(component=component, host=host) + self._cache_components_and_hosts(components, hosts) + + to_add = self._to_entries(components=components, hosts=hosts) + + self._current |= to_add + + return self + + async def remove( + self: Self, component: Component | Iterable[Component], host: Host | Iterable[Host] | Filter + ) -> Self: + components, hosts = await self._resolve_components_and_hosts(component=component, host=host) + self._cache_components_and_hosts(components, hosts) + + to_remove = self._to_entries(components=components, hosts=hosts) + + self._current -= to_remove + + return self + + @cached_property + def components(self: Self) -> ComponentsMappingNode: + return ComponentsMappingNode(cluster=self._cluster, requester=self._owner.requester) + + @cached_property + def hosts(self: Self) -> HostsAccessor: + from adcm_aio_client.core.objects.cm import HostsAccessor + + cluster_hosts_path = (*self._cluster.get_own_path(), "hosts") + + return HostsAccessor(path=cluster_hosts_path, requester=self._owner.requester) + + async def _resolve_components_and_hosts( + self: Self, component: Component | Iterable[Component], host: Host | Iterable[Host] | Filter + ) -> tuple[Iterable[Component], Iterable[Host]]: + from adcm_aio_client.core.objects.cm import Component, Host + + if isinstance(component, Component): + component = (component,) + + if isinstance(host, Host): + host = (host,) + elif isinstance(host, Filter): + inline_filters = filters_to_inline(host) + host = await self.hosts.filter(**inline_filters) + + return component, host + + def _cache_components_and_hosts(self: Self, components: Iterable[Component], hosts: Iterable[Host]) -> None: + self._components |= {component.id: component for component in components} + self._hosts |= {host.id: host for host in hosts} + + def _to_entries(self: Self, components: Iterable[Component], hosts: Iterable[Host]) -> set[MappingEntry]: + return {MappingEntry(host_id=host.id, component_id=component.id) for host in hosts for component in components} + + def _to_payload(self: Self) -> list[dict]: + return [{"componentId": entry.component_id, "hostId": entry.host_id} for entry in self._current] + + +class ClusterMapping(ActionMapping): + def __init__(self: Self, owner: Cluster, entries: Iterable[MappingPair]) -> None: + super().__init__(owner=owner, cluster=owner, entries=entries) + + @classmethod + async def for_cluster(cls: type[Self], owner: Cluster) -> Self: + instance = cls(owner=owner, entries=()) + await instance.refresh(strategy=apply_remote_changes) + return instance + + async def save(self: Self) -> Self: + data = self._to_payload() + + try: + await self._requester.post(*self._cluster.get_own_path(), "mapping", data=data) + except ConflictError as e: + # TODO: may be incomplete. Add tests for errors + conflict_msgs = { + "has unsatisfied constraint", + "No required service", + "hosts in maintenance mode", + "COMPONENT_CONSTRAINT_ERROR", + } + if any(msg in str(e) for msg in conflict_msgs): + raise InvalidMappingError(*e.args) from None + raise + except BadRequestError as e: + bad_request_msgs = {"Mapping entries duplicates found"} + if any((msg in str(e)) for msg in bad_request_msgs): + raise InvalidMappingError(*e.args) from None + raise + + self._initial = copy(self._current) + + return self + + async def refresh(self: Self, strategy: MappingRefreshStrategy = apply_local_changes) -> Self: + response = await self._requester.get(*self._cluster.get_own_path(), "mapping") + remote = { + MappingEntry(component_id=entry["componentId"], host_id=entry["hostId"]) for entry in response.as_list() + } + + local = LocalMappings(initial=self._initial, current=self._current) + merged_mapping = strategy(local=local, remote=remote) + + self._initial = merged_mapping + self._current = copy(merged_mapping) + + await self._fill_missing_objects() + + return self + + async def _fill_missing_objects(self: Self) -> None: + missing_hosts = set() + missing_components = set() + + for entry in self._current | self._initial: + if entry.host_id not in self._hosts: + missing_hosts.add(entry.host_id) + + if entry.component_id not in self._components: + missing_components.add(entry.component_id) + + hosts_task = self._run_task_if_objects_are_missing(method=self.hosts.list, missing_objects=missing_hosts) + + components_task = self._run_task_if_objects_are_missing( + method=self.components.list, missing_objects=missing_components + ) + + if hosts_task is not None: + self._hosts |= {host.id: host for host in await hosts_task} + + if components_task is not None: + self._components |= {component.id: component for component in await components_task} + + def _run_task_if_objects_are_missing( + self: Self, method: Callable[[dict], Coroutine], missing_objects: set[int] + ) -> asyncio.Task | None: + if not missing_objects: + return None + + ids_str = ",".join(map(str, missing_objects)) + # limit in case there are more than 1 page of objects + records_amount = len(missing_objects) + query = {"id__in": ids_str, "limit": records_amount} + + return asyncio.create_task(method(query)) diff --git a/adcm_aio_client/core/mapping/refresh.py b/adcm_aio_client/core/mapping/refresh.py new file mode 100644 index 00000000..3aa2c1f5 --- /dev/null +++ b/adcm_aio_client/core/mapping/refresh.py @@ -0,0 +1,39 @@ +from adcm_aio_client.core.mapping.types import LocalMappings, MappingData + +type Added = MappingData +type Removed = MappingData + + +def apply_local_changes(local: LocalMappings, remote: MappingData) -> MappingData: + if local.initial == remote: + return local.current + + local_added, local_removed = _find_difference(previous=local.initial, current=local.current) + + remote |= local_added + remote -= local_removed + + return remote + + +def apply_remote_changes(local: LocalMappings, remote: MappingData) -> MappingData: + local_added, local_removed = _find_difference(previous=local.initial, current=local.current) + + remote_added, remote_removed = _find_difference(previous=local.initial, current=remote) + + # `to_add` feels impossible, because remote can't remove what we haven't added, + # yet it's general rule for this strategy, so we'll keep it for a time being + to_add = local_added - remote_removed + to_remove = local_removed - remote_added + + remote |= to_add + remote -= to_remove + + return remote + + +def _find_difference(previous: MappingData, current: MappingData) -> tuple[Added, Removed]: + added = current - previous + removed = previous - current + + return added, removed diff --git a/adcm_aio_client/core/mapping/types.py b/adcm_aio_client/core/mapping/types.py new file mode 100644 index 00000000..3e7bc67b --- /dev/null +++ b/adcm_aio_client/core/mapping/types.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, NamedTuple, Protocol + +from adcm_aio_client.core.types import ComponentID, HostID + +if TYPE_CHECKING: + from adcm_aio_client.core.objects.cm import Component, Host + + +type MappingPair = tuple[Component, Host] + + +class MappingEntry(NamedTuple): + host_id: HostID + component_id: ComponentID + + +type MappingData = set[MappingEntry] + + +class LocalMappings(NamedTuple): + initial: MappingData + current: MappingData + + +class MappingRefreshStrategy(Protocol): + def __call__(self, local: LocalMappings, remote: MappingData) -> MappingData: ... # noqa: ANN101 diff --git a/adcm_aio_client/core/objects/__init__.py b/adcm_aio_client/core/objects/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/adcm_aio_client/core/objects/_accessors.py b/adcm_aio_client/core/objects/_accessors.py new file mode 100644 index 00000000..dc161c0e --- /dev/null +++ b/adcm_aio_client/core/objects/_accessors.py @@ -0,0 +1,136 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from contextlib import suppress +from typing import Any, AsyncGenerator, Self + +from adcm_aio_client.core.errors import MultipleObjectsReturnedError, ObjectDoesNotExistError +from adcm_aio_client.core.filters import Filter, Filtering, FilterValue +from adcm_aio_client.core.objects._base import InteractiveChildObject, InteractiveObject +from adcm_aio_client.core.types import Endpoint, QueryParameters, Requester, RequesterResponse + +# filter for narrowing response objects +type DefaultQueryParams = QueryParameters | None + + +def filters_to_inline(*filters: Filter) -> dict: + return {f"{f.attr}__{f.op}": f.value for f in filters} + + +class Accessor[ReturnObject: InteractiveObject](ABC): + class_type: type[ReturnObject] + filtering: Filtering + + def __init__(self: Self, path: Endpoint, requester: Requester, default_query: DefaultQueryParams = None) -> None: + self._path = path + self._requester = requester + self._default_query = default_query or {} + + @abstractmethod + async def iter(self: Self, **filters: FilterValue) -> AsyncGenerator[ReturnObject, None]: ... + + @abstractmethod + def _extract_results_from_response(self: Self, response: RequesterResponse) -> list[dict]: ... + + async def get(self: Self, **filters: FilterValue) -> ReturnObject: + response = await self._request_endpoint(query={"offset": 0, "limit": 2}, filters=filters) + results = self._extract_results_from_response(response=response) + + if not results: + raise ObjectDoesNotExistError("No objects found with the given filter.") + + if len(results) > 1: + raise MultipleObjectsReturnedError("More than one object found.") + + return self._create_object(results[0]) + + async def get_or_none(self: Self, **filters: FilterValue) -> ReturnObject | None: + with suppress(ObjectDoesNotExistError): + return await self.get(**filters) + + return None + + async def all(self: Self) -> list[ReturnObject]: + return await self.filter() + + async def filter(self: Self, **filters: FilterValue) -> list[ReturnObject]: + return [i async for i in self.iter(**filters)] + + async def list(self: Self, query: dict | None = None) -> list[ReturnObject]: + response = await self._request_endpoint(query=query or {}) + results = self._extract_results_from_response(response) + return [self._create_object(obj) for obj in results] + + async def _request_endpoint( + self: Self, query: QueryParameters, filters: dict[str, Any] | None = None + ) -> RequesterResponse: + filters_query = self.filtering.inline_filters_to_query(filters=filters or {}) + + final_query = filters_query | query | self._default_query + + return await self._requester.get(*self._path, query=final_query) + + def _create_object(self: Self, data: dict[str, Any]) -> ReturnObject: + return self.class_type(requester=self._requester, data=data) + + +class PaginatedAccessor[ReturnObject: InteractiveObject](Accessor[ReturnObject]): + async def iter(self: Self, **filters: FilterValue) -> AsyncGenerator[ReturnObject, None]: + start, step = 0, 10 + while True: + response = await self._request_endpoint(query={"offset": start, "limit": step}, filters=filters) + results = self._extract_results_from_response(response=response) + + if not results: + return + + for record in results: + yield self._create_object(record) + + start += step + + def _extract_results_from_response(self: Self, response: RequesterResponse) -> list[dict]: + return response.as_dict()["results"] + + +class PaginatedChildAccessor[Parent, Child: InteractiveChildObject](PaginatedAccessor[Child]): + def __init__( + self: Self, parent: Parent, path: Endpoint, requester: Requester, default_query: DefaultQueryParams = None + ) -> None: + super().__init__(path, requester, default_query) + self._parent = parent + + def _create_object(self: Self, data: dict[str, Any]) -> Child: + return self.class_type(parent=self._parent, data=data) + + +class NonPaginatedAccessor[Child: InteractiveObject](Accessor[Child]): + async def iter(self: Self, **filters: FilterValue) -> AsyncGenerator[Child, None]: + response = await self._request_endpoint(query={}, filters=filters) + results = self._extract_results_from_response(response=response) + for record in results: + yield self._create_object(record) + + def _extract_results_from_response(self: Self, response: RequesterResponse) -> list[dict]: + return response.as_list() + + +class NonPaginatedChildAccessor[Parent, Child: InteractiveChildObject](NonPaginatedAccessor[Child]): + def __init__( + self: Self, parent: Parent, path: Endpoint, requester: Requester, default_query: DefaultQueryParams = None + ) -> None: + super().__init__(path, requester, default_query) + self._parent = parent + + def _create_object(self: Self, data: dict[str, Any]) -> Child: + return self.class_type(parent=self._parent, data=data) diff --git a/adcm_aio_client/core/objects/_base.py b/adcm_aio_client/core/objects/_base.py new file mode 100644 index 00000000..86df2725 --- /dev/null +++ b/adcm_aio_client/core/objects/_base.py @@ -0,0 +1,142 @@ +from collections import deque +from contextlib import suppress +from functools import cached_property +from typing import Any, Self + +from asyncstdlib.functools import CachedProperty + +from adcm_aio_client.core.types import ( + AwareOfOwnPath, + Endpoint, + MaintenanceModeStatus, + Requester, + WithProtectedRequester, + WithRequesterProperty, +) + + +class InteractiveObject(WithProtectedRequester, WithRequesterProperty, AwareOfOwnPath): + PATH_PREFIX: str + _delete_on_refresh: deque[str] + + def __init_subclass__(cls: type[Self]) -> None: + super().__init_subclass__() + + # names of cached properties, so they can be deleted + cls._delete_on_refresh = deque() + for name in dir(cls): + # None is for declared, but unset values + attr = getattr(cls, name, None) + if isinstance(attr, (cached_property, CachedProperty)): + cls._delete_on_refresh.append(name) + + def __init__(self: Self, requester: Requester, data: dict[str, Any]) -> None: + self._requester = requester + self._data = data + + @property + def requester(self: Self) -> Requester: + return self._requester + + @cached_property + def id(self: Self) -> int: + # it's the default behavior, without id many things can't be done + return int(self._data["id"]) + + async def refresh(self: Self) -> Self: + self._data = await self._retrieve_data() + self._clear_cache() + + return self + + async def _retrieve_data(self: Self) -> dict: + response = await self._requester.get(*self.get_own_path()) + return response.as_dict() + + def _construct[Object: "InteractiveObject"](self: Self, what: type[Object], from_data: dict[str, Any]) -> Object: + return what(requester=self._requester, data=from_data) + + def _construct_child[Child: "InteractiveChildObject"]( + self: Self, what: type[Child], from_data: dict[str, Any] + ) -> Child: + return what(data=from_data, parent=self) + + def _clear_cache(self: Self) -> None: + for name in self._delete_on_refresh: + # Works for cached_property. Suppresses errors on deleting values not yet cached (absent in self.__dict__) + with suppress(AttributeError): + delattr(self, name) + + def __str__(self: Self) -> str: + return self._repr + + def __repr__(self: Self) -> str: + return self._repr + + @property + def _repr(self: Self) -> str: + name = getattr(self, "name", None) + name = f" {name}" if isinstance(name, str) else "" + return f"<{self.__class__.__name__} #{self.id}{name}>" + + +class RootInteractiveObject(InteractiveObject): + def get_own_path(self: Self) -> Endpoint: + # change here + return self._build_own_path(self.id) + + @classmethod + async def with_id(cls: type[Self], requester: Requester, object_id: int) -> Self: + object_path = cls._build_own_path(object_id) + response = await requester.get(*object_path) + return cls(requester=requester, data=response.as_dict()) + + @classmethod + def _build_own_path(cls: type[Self], object_id: int) -> Endpoint: + return cls.PATH_PREFIX, object_id + + +class InteractiveChildObject[Parent: InteractiveObject](InteractiveObject): + def __init__(self: Self, parent: Parent, data: dict[str, Any]) -> None: + super().__init__(requester=parent.requester, data=data) + self._parent = parent + + def get_own_path(self: Self) -> Endpoint: + return *self._parent.get_own_path(), self.PATH_PREFIX, self.id + + @classmethod + async def with_id(cls: type[Self], parent: Parent, object_id: int) -> Self: + object_path = (*parent.get_own_path(), cls.PATH_PREFIX, str(object_id)) + response = await parent.requester.get(*object_path) + return cls(parent=parent, data=response.as_dict()) + + +class MaintenanceMode: + def __init__( + self: Self, maintenance_mode_status: MaintenanceModeStatus, requester: Requester, path: Endpoint + ) -> None: + self._maintenance_mode_status = maintenance_mode_status + self._requester = requester + self._path = path + + def __repr__(self: Self) -> str: + return self._maintenance_mode_status + + def __str__(self: Self) -> str: + return self._maintenance_mode_status + + @property + def value(self: Self) -> str: + return self._maintenance_mode_status + + async def on(self: Self) -> None: + current_mm_status = await self._requester.post( + *self._path, "maintenance-mode", data={"maintenanceMode": MaintenanceModeStatus.ON} + ) + self._maintenance_mode_status = current_mm_status.as_dict()["maintenanceMode"] + + async def off(self: Self) -> None: + current_mm_status = await self._requester.post( + *self._path, "maintenanceMode", data={"maintenanceMode": MaintenanceModeStatus.OFF} + ) + self._maintenance_mode_status = current_mm_status.as_dict()["maintenanceMode"] diff --git a/adcm_aio_client/core/objects/_common.py b/adcm_aio_client/core/objects/_common.py new file mode 100644 index 00000000..0db16076 --- /dev/null +++ b/adcm_aio_client/core/objects/_common.py @@ -0,0 +1,65 @@ +from functools import cached_property +from typing import Self + +from asyncstdlib.functools import cached_property as async_cached_property # noqa: N813 + +from adcm_aio_client.core.actions import ActionsAccessor, UpgradeNode +from adcm_aio_client.core.config import ConfigHistoryNode, ObjectConfig +from adcm_aio_client.core.config._objects import ConfigOwner +from adcm_aio_client.core.objects._base import AwareOfOwnPath, MaintenanceMode, WithProtectedRequester +from adcm_aio_client.core.objects._imports import Imports + + +class Deletable(WithProtectedRequester, AwareOfOwnPath): + async def delete(self: Self) -> None: + await self._requester.delete(*self.get_own_path()) + + +class WithStatus(WithProtectedRequester, AwareOfOwnPath): + async def get_status(self: Self) -> str: + response = await self._requester.get(*self.get_own_path()) + return response.as_dict()["status"] + + +class WithActions(WithProtectedRequester, AwareOfOwnPath): + @cached_property + def actions(self: Self) -> ActionsAccessor: + # `WithActions` can actually be InteractiveObject, but it isn't required + # based on usages, so for now it's just ignore + return ActionsAccessor(parent=self, path=(*self.get_own_path(), "actions"), requester=self._requester) # type: ignore[reportArgumentType] + + +class WithConfig(ConfigOwner): + @cached_property + async def config(self: Self) -> ObjectConfig: + return await self.config_history.current() + + @cached_property + def config_history(self: Self) -> ConfigHistoryNode: + return ConfigHistoryNode(parent=self) + + +class WithUpgrades(WithProtectedRequester, AwareOfOwnPath): + @cached_property + def upgrades(self: Self) -> UpgradeNode: + return UpgradeNode(parent=self, path=(*self.get_own_path(), "upgrades"), requester=self._requester) + + +class WithMaintenanceMode(WithProtectedRequester, AwareOfOwnPath): + @async_cached_property + async def maintenance_mode(self: Self) -> MaintenanceMode: + maintenance_mode = MaintenanceMode(self._data["maintenanceMode"], self._requester, self.get_own_path()) # pyright: ignore[reportAttributeAccessIssue] + self._data["maintenanceMode"] = maintenance_mode.value # pyright: ignore[reportAttributeAccessIssue] + return maintenance_mode + + +class WithJobStatus(WithProtectedRequester, AwareOfOwnPath): + async def get_job_status(self: Self) -> str: + response = await self._requester.get(*self.get_own_path()) + return response.as_dict()["status"] + + +class WithImports(WithProtectedRequester, AwareOfOwnPath): + @async_cached_property + async def imports(self: Self) -> Imports: + return Imports(requester=self._requester, path=(*self.get_own_path(), "imports")) diff --git a/adcm_aio_client/core/objects/_imports.py b/adcm_aio_client/core/objects/_imports.py new file mode 100644 index 00000000..97a16821 --- /dev/null +++ b/adcm_aio_client/core/objects/_imports.py @@ -0,0 +1,47 @@ +from typing import TYPE_CHECKING, Collection, Iterable, Self, Union + +from adcm_aio_client.core.types import Endpoint, Requester + +if TYPE_CHECKING: + from adcm_aio_client.core.objects.cm import Cluster, Service + + +class Imports: + def __init__(self: Self, requester: Requester, path: Endpoint) -> None: + self._requester = requester + self._path = path + + async def _get_source_binds(self: Self) -> set[tuple[int, str]]: + response = await self._requester.get(*self._path) + data_binds = set() + + for import_data in response.as_dict()["results"]: + binds = import_data.get("binds", []) + for bind in binds: + bind_id = int(bind["source"]["id"]) + bind_type = bind["source"]["type"] + data_binds.add((bind_id, bind_type)) + + return data_binds + + def _create_post_data(self: Self, binds: Iterable[tuple[int, str]]) -> list[dict[str, dict[str, int | str]]]: + return [{"source": {"id": source[0], "type": source[1]}} for source in binds] + + def _sources_to_binds(self: Self, sources: Collection[Union["Cluster", "Service"]]) -> set[tuple[int, str]]: + return {(s.id, s.__class__.__name__.lower()) for s in sources} + + async def add(self: Self, sources: Collection[Union["Cluster", "Service"]]) -> None: + current_binds = await self._get_source_binds() + sources_binds = self._sources_to_binds(sources) + binds_to_set = current_binds.union(sources_binds) + await self._requester.post(*self._path, data=self._create_post_data(binds_to_set)) + + async def set(self: Self, sources: Collection[Union["Cluster", "Service"]]) -> None: + binds_to_set = self._sources_to_binds(sources) + await self._requester.post(*self._path, data=self._create_post_data(binds_to_set)) + + async def remove(self: Self, sources: Collection[Union["Cluster", "Service"]]) -> None: + current_binds = await self._get_source_binds() + sources_binds = self._sources_to_binds(sources) + binds_to_set = current_binds.difference(sources_binds) + await self._requester.post(*self._path, data=self._create_post_data(binds_to_set)) diff --git a/adcm_aio_client/core/objects/cm.py b/adcm_aio_client/core/objects/cm.py new file mode 100644 index 00000000..9d53b2ff --- /dev/null +++ b/adcm_aio_client/core/objects/cm.py @@ -0,0 +1,676 @@ +from collections import deque +from datetime import datetime, timedelta +from functools import cached_property +from pathlib import Path +from typing import Any, AsyncGenerator, Awaitable, Callable, Iterable, Literal, Self +import asyncio + +from asyncstdlib.functools import cached_property as async_cached_property # noqa: N813 + +from adcm_aio_client.core.actions._objects import Action +from adcm_aio_client.core.errors import ( + ConflictError, + HostConflictError, + InvalidFilterError, + NotFoundError, + ObjectAlreadyExistsError, + ObjectUpdateError, + UnknownError, + WaitTimeoutError, +) +from adcm_aio_client.core.filters import ( + ALL_OPERATIONS, + COMMON_OPERATIONS, + Filter, + FilterBy, + FilterByDisplayName, + FilterByName, + FilterByStatus, + Filtering, + FilterValue, +) +from adcm_aio_client.core.host_groups import WithActionHostGroups, WithConfigHostGroups +from adcm_aio_client.core.host_groups.action_group import ActionHostGroup +from adcm_aio_client.core.mapping import ClusterMapping +from adcm_aio_client.core.objects._accessors import ( + PaginatedAccessor, + PaginatedChildAccessor, + filters_to_inline, +) +from adcm_aio_client.core.objects._base import ( + InteractiveChildObject, + InteractiveObject, + RootInteractiveObject, +) +from adcm_aio_client.core.objects._common import ( + Deletable, + WithActions, + WithConfig, + WithImports, + WithMaintenanceMode, + WithStatus, + WithUpgrades, +) +from adcm_aio_client.core.requesters import BundleRetrieverInterface +from adcm_aio_client.core.types import ( + DEFAULT_JOB_TERMINAL_STATUSES, + Endpoint, + Requester, + URLStr, + WithProtectedRequester, +) +from adcm_aio_client.core.utils import safe_gather + + +class ADCM(InteractiveObject, WithActions, WithConfig): + def __init__(self: Self, requester: Requester, data: dict[str, Any], version: str) -> None: + super().__init__(requester=requester, data=data) + self._version = version + + @cached_property + def id(self: Self) -> int: + return 1 + + @property + def version(self: Self) -> str: + return self._version + + def get_own_path(self: Self) -> Endpoint: + return ("adcm",) + + +class License(WithProtectedRequester): + def __init__(self: Self, requester: Requester, prototypes_data: dict) -> None: + self._license_prototype_id = prototypes_data["id"] + self._data = prototypes_data["license"] + self._requester = requester + + @property + def text(self: Self) -> str: + return str(self._data["text"]) + + @property + def state(self: Self) -> Literal["absent", "accepted", "unaccepted"]: + return self._data["status"] + + async def accept(self: Self) -> str: + await self._requester.post("prototypes", self._license_prototype_id, "license", "accept", data={}) + self._data["status"] = "accepted" + return self._data["status"] + + +class Bundle(Deletable, RootInteractiveObject): + PATH_PREFIX = "bundles" + + @property + def name(self: Self) -> str: + return str(self._data["name"]) + + @property + def display_name(self: Self) -> str: + return str(self._data["display_name"]) + + @property + def version(self: Self) -> str: + return str(self._data["version"]) + + @property + def edition(self: Self) -> Literal["community", "enterprise"]: + return self._data["edition"] + + @property + def signature_status(self: Self) -> Literal["invalid", "valid", "absent"]: + return self._data["signatureStatus"] + + @property + def _type(self: Self) -> Literal["cluster", "provider"]: + return self._data["mainPrototype"]["type"] + + @async_cached_property + async def license(self: Self) -> License: + return License(self._requester, self._data["mainPrototype"]) + + @cached_property + def _main_prototype_id(self: Self) -> int: + return self._data["mainPrototype"]["id"] + + +class BundlesNode(PaginatedAccessor[Bundle]): + class_type = Bundle + filtering = Filtering( + FilterByName, + FilterByDisplayName, + FilterBy("version", ALL_OPERATIONS, str), + FilterBy("edition", ALL_OPERATIONS, str), + ) + + def __init__(self: Self, path: Endpoint, requester: Requester, retriever: BundleRetrieverInterface) -> None: + super().__init__(path, requester) + self._bundle_retriever = retriever + + async def create(self: Self, source: Path | URLStr, *, accept_license: bool = False) -> Bundle: + if isinstance(source, Path): + file = Path(source).read_bytes() + else: + file = await self._bundle_retriever.download_external_bundle(source) + + try: + response = await self._requester.post_files("bundles", files={"file": file}) + except ConflictError as e: + if "Bundle already exists" in str(e): + raise ObjectAlreadyExistsError(*e.args) from None + raise + + bundle = Bundle(requester=self._requester, data=response.as_dict()) + + if accept_license: + license_ = await bundle.license + if license_.state == "unaccepted": + await license_.accept() + + return bundle + + def get_own_path(self: Self) -> Endpoint: + return ("bundles",) + + +class Cluster( + WithStatus, + Deletable, + WithActions, + WithUpgrades, + WithConfig, + WithImports, + WithActionHostGroups, + WithConfigHostGroups, + RootInteractiveObject, +): + PATH_PREFIX = "clusters" + + # data-based properties + + @property + def name(self: Self) -> str: + return str(self._data["name"]) + + @property + def description(self: Self) -> str: + return str(self._data["description"]) + + # related/dynamic data access + + @async_cached_property + async def bundle(self: Self) -> Bundle: + prototype_id = self._data["prototype"]["id"] + response = await self._requester.get("prototypes", prototype_id) + + bundle_id = response.as_dict()["bundle"]["id"] + response = await self._requester.get("bundles", bundle_id) + + return self._construct(what=Bundle, from_data=response.as_dict()) + + # object-specific methods + + async def set_ansible_forks(self: Self, value: int) -> Self: + try: + await self._requester.post( + *self.get_own_path(), "ansible-config", data={"config": {"defaults": {"forks": value}}, "adcmMeta": {}} + ) + except UnknownError as e: + raise ObjectUpdateError(*e.args) from None + return self + + # nodes and managers to access + + @async_cached_property + async def mapping(self: Self) -> ClusterMapping: + return await ClusterMapping.for_cluster(owner=self) + + @cached_property + def services(self: Self) -> "ServicesNode": + return ServicesNode(parent=self, path=(*self.get_own_path(), "services"), requester=self._requester) + + @cached_property + def hosts(self: Self) -> "HostsInClusterNode": + return HostsInClusterNode(cluster=self) + + +FilterByBundle = FilterBy("bundle", COMMON_OPERATIONS, Bundle) + + +class ClustersNode(PaginatedAccessor[Cluster]): + class_type = Cluster + filtering = Filtering(FilterByName, FilterByBundle, FilterByStatus) + + async def create(self: Self, bundle: Bundle, name: str, description: str = "") -> Cluster: + try: + response = await self._requester.post( + "clusters", + data={ + "prototypeId": bundle._main_prototype_id, + "name": name, + "description": description, + }, + ) + except ConflictError as e: + if "already exists" in str(e): + raise ObjectAlreadyExistsError(*e.args) from None + raise + + return Cluster(requester=self._requester, data=response.as_dict()) + + +class Service( + WithStatus, + Deletable, + WithActions, + WithConfig, + WithImports, + WithActionHostGroups, + WithConfigHostGroups, + InteractiveChildObject[Cluster], +): + PATH_PREFIX = "services" + + @property + def name(self: Self) -> str: + return self._data["name"] + + @property + def display_name(self: Self) -> str: + return self._data["displayName"] + + @cached_property + def cluster(self: Self) -> Cluster: + return self._parent + + @cached_property + def components(self: Self) -> "ComponentsNode": + return ComponentsNode(parent=self, path=(*self.get_own_path(), "components"), requester=self._requester) + + @async_cached_property + async def license(self: Self) -> License: + prototype_data = (await self.requester.get("prototypes", self._data["prototype"]["id"])).as_dict() + return License(self._requester, prototype_data) + + +class ServicesNode(PaginatedChildAccessor[Cluster, Service]): + class_type = Service + filtering = Filtering(FilterByName, FilterByDisplayName, FilterByStatus) + service_add_filtering = Filtering(FilterByName, FilterByDisplayName) + + async def add(self: Self, filter_: Filter, *, accept_license: bool = False) -> list[Service]: + candidates = await self._retrieve_service_candidates(filter_=filter_) + + if not candidates: + message = "No services to add by given filters" + raise NotFoundError(message) + + if accept_license: + await self._accept_licenses_safe(candidates) + + return await self._add_services(candidates) + + async def _retrieve_service_candidates(self: Self, filter_: Filter) -> list[dict]: + query = self.service_add_filtering.to_query(filters=(filter_,)) + response = await self._requester.get(*self._parent.get_own_path(), "service-candidates", query=query) + return response.as_list() + + async def _accept_licenses_safe(self: Self, candidates: list[dict]) -> None: + unaccepted: deque[int] = deque() + + for candidate in candidates: + if candidate["license"]["status"] == "unaccepted": + unaccepted.append(candidate["id"]) + + if unaccepted: + tasks = ( + self._requester.post("prototypes", prototype_id, "license", "accept", data={}) + for prototype_id in unaccepted + ) + await asyncio.gather(*tasks) + + async def _add_services(self: Self, candidates: list[dict]) -> list[Service]: + data = [{"prototypeId": candidate["id"]} for candidate in candidates] + response = await self._requester.post(*self._parent.get_own_path(), "services", data=data) + return [Service(data=entry, parent=self._parent) for entry in response.as_list()] + + +class Component( + WithStatus, WithActions, WithConfig, WithActionHostGroups, WithConfigHostGroups, InteractiveChildObject[Service] +): + PATH_PREFIX = "components" + + @property + def name(self: Self) -> str: + return self._data["name"] + + @property + def display_name(self: Self) -> str: + return self._data["displayName"] + + @async_cached_property + async def constraint(self: Self) -> list[int | str]: + response = (await self._requester.get(*self.cluster.get_own_path(), "mapping", "components")).as_list() + for component in response: + if component["id"] == self.id: + return component["constraints"] + + raise NotFoundError + + @cached_property + def service(self: Self) -> Service: + return self._parent + + @cached_property + def cluster(self: Self) -> Cluster: + return self.service.cluster + + @cached_property + def hosts(self: Self) -> "HostsAccessor": + return HostsAccessor( + path=(*self.cluster.get_own_path(), "hosts"), + requester=self._requester, + default_query={"componentId": self.id}, + ) + + +class ComponentsNode(PaginatedChildAccessor[Service, Component]): + class_type = Component + filtering = Filtering(FilterByName, FilterByDisplayName, FilterByStatus) + + +class HostProvider(Deletable, WithActions, WithUpgrades, WithConfig, WithConfigHostGroups, RootInteractiveObject): + PATH_PREFIX = "hostproviders" + filtering = Filtering(FilterByName, FilterByBundle) + + # data-based properties + + @property + def name(self: Self) -> str: + return str(self._data["name"]) + + @property + def description(self: Self) -> str: + return str(self._data["description"]) + + @property + def display_name(self: Self) -> str: + return str(self._data["prototype"]["displayName"]) + + @cached_property + def hosts(self: Self) -> "HostsAccessor": + return HostsAccessor(path=("hosts",), requester=self._requester, default_query={"hostproviderName": self.name}) + + +class HostProvidersNode(PaginatedAccessor[HostProvider]): + class_type = HostProvider + filtering = Filtering(FilterByName, FilterByBundle) + + async def create(self: Self, bundle: Bundle, name: str, description: str = "") -> HostProvider: + try: + response = await self._requester.post( + "hostproviders", + data={ + "prototypeId": bundle._main_prototype_id, + "name": name, + "description": description, + }, + ) + except ConflictError as e: + if "duplicate host provider" in str(e): + raise ObjectAlreadyExistsError(*e.args) from None + raise + + return HostProvider(requester=self._requester, data=response.as_dict()) + + +class Host(Deletable, WithActions, WithStatus, WithMaintenanceMode, RootInteractiveObject): + PATH_PREFIX = "hosts" + + @property + def name(self: Self) -> str: + return str(self._data["name"]) + + @property + def description(self: Self) -> str: + return str(self._data["description"]) + + @async_cached_property + async def cluster(self: Self) -> Cluster | None: + if not self._data["cluster"]: + return None + + return await Cluster.with_id(requester=self._requester, object_id=self._data["cluster"]["id"]) + + @async_cached_property + async def hostprovider(self: Self) -> HostProvider: + return await HostProvider.with_id(requester=self._requester, object_id=self._data["hostprovider"]["id"]) + + +class HostsAccessor(PaginatedAccessor[Host]): + class_type = Host + filtering = Filtering(FilterByName, FilterByStatus, FilterBy("hostprovider", COMMON_OPERATIONS, HostProvider)) + + +class HostsNode(HostsAccessor): + async def create( + self: Self, hostprovider: HostProvider, name: str, description: str = "", cluster: Cluster | None = None + ) -> None: + data = {"hostproviderId": hostprovider.id, "name": name, "description": description} + if cluster: + data["clusterId"] = cluster.id + try: + await self._requester.post(*self._path, data=data) + except ConflictError as e: + if "already exists" in str(e): + raise ObjectAlreadyExistsError(*e.args) from None + raise + + +class HostsInClusterNode(HostsAccessor): + def __init__(self: Self, cluster: Cluster) -> None: + path = (*cluster.get_own_path(), "hosts") + super().__init__(path=path, requester=cluster.requester) + + self._root_host_filter = HostsAccessor(path=("hosts",), requester=cluster.requester).filter + + async def add(self: Self, host: Host | Iterable[Host] | Filter) -> None: + hosts = await self._get_hosts(host=host, filter_func=self._root_host_filter) + + try: + await self._requester.post(*self._path, data=[{"hostId": host.id} for host in hosts]) + except ConflictError as e: + if "already linked to another cluster" in str(e): + raise HostConflictError(*e.args) from None + raise + + async def remove(self: Self, host: Host | Iterable[Host] | Filter) -> None: + hosts = await self._get_hosts(host=host, filter_func=self.filter) + + error = await safe_gather( + coros=(self._requester.delete(*self._path, host_.id) for host_ in hosts), + msg="Some hosts can't be deleted from cluster", + ) + + if error is not None: + raise error + + async def _get_hosts( + self: Self, host: Host | Iterable[Host] | Filter, filter_func: Callable[..., Awaitable[list[Host]]] + ) -> tuple[Host, ...]: + if isinstance(host, Host): + hosts = (host,) + elif isinstance(host, Filter): + inline_filters = filters_to_inline(host) + hosts = await filter_func(**inline_filters) + else: + hosts = host + + return tuple(hosts) + + +async def default_exit_condition(job: "Job") -> bool: + return await job.get_status() in DEFAULT_JOB_TERMINAL_STATUSES + + +class Job(WithStatus, RootInteractiveObject): + PATH_PREFIX = "tasks" + + @property + def name(self: Self) -> str: + return str(self._data["name"]) + + @property + def display_name(self: Self) -> str: + return str(self._data["displayName"]) + + @cached_property + def start_time(self: Self) -> datetime | None: + time = self._data["startTime"] + if time is None: + return time + + return datetime.fromisoformat(time) + + @cached_property + def finish_time(self: Self) -> datetime | None: + time = self._data["endTime"] + if time is None: + return time + + return datetime.fromisoformat(time) + + @async_cached_property + async def object(self: Self) -> InteractiveObject: + objects_raw = self._parse_objects() + return await self._retrieve_target(objects_raw) + + @async_cached_property + async def action(self: Self) -> Action: + target = await self.object + return Action(parent=target, data=self._data["action"]) + + async def wait( + self: Self, + timeout: int | None = None, + poll_interval: int = 10, + exit_condition: Callable[[Self], Awaitable[bool]] = default_exit_condition, + ) -> Self: + timeout_condition = datetime.max if timeout is None else (datetime.now() + timedelta(seconds=timeout)) # noqa: DTZ005 + + while datetime.now() < timeout_condition: # noqa: DTZ005 + if await exit_condition(self): + return self + + await asyncio.sleep(poll_interval) + + message = "Failed to meet exit condition for job" + if timeout: + message = f"{message} in {timeout} seconds with {poll_interval} second interval" + + raise WaitTimeoutError(message) + + async def terminate(self: Self) -> None: + await self._requester.post(*self.get_own_path(), "terminate", data={}) + + def _parse_objects(self: Self) -> dict[str, int]: + return {entry["type"]: entry["id"] for entry in self._data["objects"]} + + async def _retrieve_target(self: Self, objects: dict[str, int]) -> InteractiveObject: + match objects: + case {"action_host_group": id_}: + objects.pop("action_host_group") + owner = await self._retrieve_target(objects) + return await ActionHostGroup.with_id(parent=owner, object_id=id_) + + case {"host": id_}: + return await Host.with_id(requester=self._requester, object_id=id_) + + case {"component": id_}: + objects.pop("component") + + owner = await self._retrieve_target(objects) + if not isinstance(owner, Service): + message = f"Incorrect owner for component detected from job data: {owner}" + raise TypeError(message) + + return await Component.with_id(parent=owner, object_id=id_) + + case {"service": id_}: + objects.pop("service") + + owner = await self._retrieve_target(objects) + if not isinstance(owner, Cluster): + message = f"Incorrect owner for service detected from job data: {owner}" + raise TypeError(message) + + return await Service.with_id(parent=owner, object_id=id_) + + case {"cluster": id_}: + return await Cluster.with_id(requester=self._requester, object_id=id_) + + case {"provider": id_}: + return await HostProvider.with_id(requester=self._requester, object_id=id_) + case _: + message = f"Failed to detect Job's owner based on {objects}" + raise RuntimeError(message) + + +class JobsNode(PaginatedAccessor[Job]): + class_type = Job + filtering = Filtering( + FilterByName, + FilterByDisplayName, + FilterByStatus, + FilterBy("action", COMMON_OPERATIONS, Action), + # technical filters, don't use them directly + FilterBy("target_id", ("eq",), int), + FilterBy("target_type", ("eq",), str), + ) + + # override accessor methods to allow passing object + + async def get(self: Self, *, object: InteractiveObject | None = None, **filters: FilterValue) -> Job: # noqa: A002 + object_filter = self._prepare_filter_by_object(object) + all_filters = filters | object_filter + return await super().get(**all_filters) + + async def get_or_none(self: Self, *, object: InteractiveObject | None = None, **filters: FilterValue) -> Job | None: # noqa: A002 + object_filter = self._prepare_filter_by_object(object) + all_filters = filters | object_filter + return await super().get_or_none(**all_filters) + + async def filter(self: Self, *, object: InteractiveObject | None = None, **filters: FilterValue) -> list[Job]: # noqa: A002 + object_filter = self._prepare_filter_by_object(object) + all_filters = filters | object_filter + return await super().filter(**all_filters) + + async def iter( + self: Self, + *, + object: InteractiveObject | None = None, # noqa: A002 + **filters: FilterValue, + ) -> AsyncGenerator[Job, None]: + object_filter = self._prepare_filter_by_object(object) + all_filters = filters | object_filter + async for entry in super().iter(**all_filters): + yield entry + + def _prepare_filter_by_object(self: Self, object_: InteractiveObject | None) -> dict: + if object_ is None: + return {} + + object_id = object_.id + + if isinstance(object_, (Cluster, Service, Component, Host)): + object_type = object_.__class__.__name__.lower() + elif isinstance(object_, HostProvider): + object_type = "provider" + elif isinstance(object_, ActionHostGroup): + object_type = "action_host_group" + else: + message = f"Failed to build filter: {object_.__class__.__name__} " "can't be an owner of Job" + raise InvalidFilterError(message) + + return {"target_id__eq": object_id, "target_type__eq": object_type} diff --git a/adcm_aio_client/core/requesters.py b/adcm_aio_client/core/requesters.py new file mode 100644 index 00000000..4c59a146 --- /dev/null +++ b/adcm_aio_client/core/requesters.py @@ -0,0 +1,242 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from abc import ABC, abstractmethod +from asyncio import sleep +from contextlib import suppress +from dataclasses import dataclass +from functools import wraps +from json.decoder import JSONDecodeError +from typing import Any, Awaitable, Callable, Coroutine, ParamSpec, Self, TypeAlias +from urllib.parse import urljoin + +import httpx + +from adcm_aio_client.core.errors import ( + AuthenticationError, + BadRequestError, + ConflictError, + LoginError, + LogoutError, + NoCredentialsError, + NotFoundError, + OperationError, + PermissionDeniedError, + ResponseDataConversionError, + RetryRequestError, + ServerError, + UnauthorizedError, + UnknownError, +) +from adcm_aio_client.core.types import Credentials, PathPart, QueryParameters, Requester, RetryPolicy, URLStr + +Json: TypeAlias = Any +Params = ParamSpec("Params") +RequestFunc: TypeAlias = Callable[Params, Awaitable["HTTPXRequesterResponse"]] +DoRequestFunc: TypeAlias = Callable[Params, Awaitable[httpx.Response]] + + +@dataclass(slots=True) +class HTTPXRequesterResponse: + response: httpx.Response + _json_data: Json | None = None + + def as_list(self: Self) -> list: + if not isinstance(data := self._get_json_data(), list): + message = f"Expected a list, got {type(data)}" + raise ResponseDataConversionError(message) + + return data + + def as_dict(self: Self) -> dict: + if not isinstance(data := self._get_json_data(), dict): + message = f"Expected a dict, got {type(data)}" + raise ResponseDataConversionError(message) + + return data + + def _get_json_data(self: Self) -> Json: + if self._json_data is not None: + return self._json_data + + try: + data = self.response.json() + except JSONDecodeError as e: + message = "Response can't be parsed to json" + raise ResponseDataConversionError(message) from e + + self._json_data = data + + return self._json_data + + +STATUS_ERRORS_MAP = { + 400: BadRequestError, + 401: UnauthorizedError, + 403: PermissionDeniedError, + 404: NotFoundError, + 409: ConflictError, + 500: ServerError, +} + + +def convert_exceptions(func: DoRequestFunc) -> DoRequestFunc: + @wraps(func) + async def wrapper(*arg: Params.args, **kwargs: Params.kwargs) -> httpx.Response: + response = await func(*arg, **kwargs) + if response.status_code >= 300: + error_cls = STATUS_ERRORS_MAP.get(response.status_code, UnknownError) + # not safe, because can be not json + try: + message = response.json() + except JSONDecodeError: + message = f"Request failed with > 300 response code: {response.content.decode('utf-8')}" + raise error_cls(message) + + return response + + return wrapper + + +def retry_request(request_func: RequestFunc) -> RequestFunc: + @wraps(request_func) + async def wrapper(self: "DefaultRequester", *args: Params.args, **kwargs: Params.kwargs) -> HTTPXRequesterResponse: + retries = self._retries + last_error = None + + for attempt in range(retries.attempts): + try: + response = await request_func(self, *args, **kwargs) + except (UnauthorizedError, httpx.NetworkError, httpx.TransportError) as e: + last_error = e + if attempt >= retries.attempts - 1: + continue + + await sleep(retries.interval) + + with suppress(httpx.NetworkError, httpx.TransportError): + await self.login(self._ensure_credentials()) + else: + break + else: + message = f"Request failed in {retries.interval} attempts" + if last_error is None: + raise RetryRequestError(message) + + message = f"{message}. Last error: {last_error}" + raise RetryRequestError(message) from last_error + + return response + + return wrapper + + +class DefaultRequester(Requester): + __slots__ = ("_credentials", "_client", "_retries", "_prefix") + + def __init__(self: Self, http_client: httpx.AsyncClient, retries: RetryPolicy) -> None: + self._retries = retries + self._client = http_client + self._prefix = "/api/v2/" + self._credentials = None + + @property + def client(self: Self) -> httpx.AsyncClient: + return self._client + + async def login(self: Self, credentials: Credentials) -> Self: + login_url = self._make_url("login") + + try: + response = await self._do_request(self.client.post(url=login_url, data=credentials.dict())) + except UnauthorizedError as e: + message = ( + f"Login to ADCM at {self.client.base_url} has failed for " + f"user {credentials.username} most likely due to incorrect credentials" + ) + raise AuthenticationError(message) from e + except UnknownError as e: + message = f"Login to ADCM at {self.client.base_url} has failed for user {credentials.username}: {e}" + raise LoginError(message) from e + + self._credentials = credentials + self.client.headers["X-CSRFToken"] = response.cookies["csrftoken"] + self.client.headers["Referer"] = str(self.client.base_url) + + return self + + async def logout(self: Self) -> Self: + logout_url = self._make_url("logout") + + try: + request_coro = self.client.post(url=logout_url, data={}) + await self._do_request(request_coro) + except UnknownError as e: + message = f"Logout from ADCM at {self.client.base_url} has failed" + raise LogoutError(message) from e + + self.client.headers.pop("X-CSRFToken", None) + + return self + + async def get(self: Self, *path: PathPart, query: QueryParameters | None = None) -> HTTPXRequesterResponse: + return await self.request(*path, method=self.client.get, params=query or {}) + + async def post_files(self: Self, *path: PathPart, files: dict) -> HTTPXRequesterResponse: + return await self.request(*path, method=self.client.post, files=files) + + async def post(self: Self, *path: PathPart, data: dict | list) -> HTTPXRequesterResponse: + return await self.request(*path, method=self.client.post, json=data) + + async def patch(self: Self, *path: PathPart, data: dict | list) -> HTTPXRequesterResponse: + return await self.request(*path, method=self.client.patch, json=data) + + async def delete(self: Self, *path: PathPart) -> HTTPXRequesterResponse: + return await self.request(*path, method=self.client.delete) + + @retry_request + async def request(self: Self, *path: PathPart, method: Callable, **kwargs: dict) -> HTTPXRequesterResponse: + url = self._make_url(*path) + response = await self._do_request(method(url, **kwargs)) + + return HTTPXRequesterResponse(response=response) + + def _make_url(self: Self, *path: PathPart) -> str: + return urljoin(self._prefix, "/".join(map(str, (*path, "")))) + + @convert_exceptions + async def _do_request(self: Self, request_coro: Coroutine[Any, Any, httpx.Response]) -> httpx.Response: + return await request_coro + + def _ensure_credentials(self: Self) -> Credentials: + if self._credentials is None: + raise NoCredentialsError + + return self._credentials + + +class BundleRetrieverInterface(ABC): + @abstractmethod + async def download_external_bundle(self: Self, url: URLStr) -> bytes: + pass + + +class BundleRetriever(BundleRetrieverInterface): + async def download_external_bundle(self: Self, url: URLStr) -> bytes: + try: + async with httpx.AsyncClient() as client: + response = await client.get(url) + response.raise_for_status() + return response.content + except ValueError as err: + raise OperationError(f"Failed to download the bundle {url}") from err + except httpx.HTTPStatusError as err: + raise OperationError(f"HTTP error occurred: {err}") from err diff --git a/adcm_aio_client/core/types.py b/adcm_aio_client/core/types.py new file mode 100644 index 00000000..ea7100d4 --- /dev/null +++ b/adcm_aio_client/core/types.py @@ -0,0 +1,128 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import asdict, dataclass +from enum import Enum +from typing import Optional, Protocol, Self + +# Init / Authorization + +type AuthToken = str +type Cert = str | tuple[str, Optional[str], Optional[str]] +type Verify = str | bool + + +@dataclass(slots=True, frozen=True) +class Credentials: + username: str + password: str + + def dict(self: Self) -> dict: + return asdict(self) + + def __repr__(self: Self) -> str: + return f"{self.username}'s credentials" + + +# Requests + +type PathPart = str | int +type Endpoint = tuple[PathPart, ...] + +type QueryParameters = dict + + +class RequesterResponse(Protocol): + def as_list(self: Self) -> list: ... + + def as_dict(self: Self) -> dict: ... + + +class Requester(Protocol): + async def get(self: Self, *path: PathPart, query: QueryParameters | None = None) -> RequesterResponse: ... + + async def post_files(self: Self, *path: PathPart, files: dict) -> RequesterResponse: ... + + async def post(self: Self, *path: PathPart, data: dict | list) -> RequesterResponse: ... + + async def patch(self: Self, *path: PathPart, data: dict | list) -> RequesterResponse: ... + + async def delete(self: Self, *path: PathPart) -> RequesterResponse: ... + + +# Session + + +@dataclass(slots=True) +class ConnectionSecurity: + verify: str | bool + certificate: Cert | None + + +@dataclass(slots=True) +class SessionInfo: + url: str + credentials: Credentials + security: ConnectionSecurity + + +@dataclass(slots=True) +class RetryPolicy: + attempts: int + interval: int + + +@dataclass(slots=True) +class RequestPolicy: + timeout: int + retry: RetryPolicy + + +# Objects + +type ComponentID = int +type HostID = int + + +class WithID(Protocol): + id: int + + +class WithProtectedRequester(Protocol): + _requester: Requester + + +class WithRequesterProperty(Protocol): + # ignored linter check, because with `: Self` type checking breaks, so it's fastfix + @property + def requester(self) -> Requester: ... # noqa: ANN101 + + +class AwareOfOwnPath(Protocol): + def get_own_path(self: Self) -> Endpoint: ... + + +class MappingOperation(str, Enum): + ADD = "add" + REMOVE = "remove" + + +type URLStr = str + + +DEFAULT_JOB_TERMINAL_STATUSES = frozenset(("broken", "aborted", "failed", "success")) + + +class MaintenanceModeStatus(str, Enum): + ON = "on" + OFF = "off" + CHANGING = "changing" diff --git a/adcm_aio_client/core/utils.py b/adcm_aio_client/core/utils.py new file mode 100644 index 00000000..86179cdb --- /dev/null +++ b/adcm_aio_client/core/utils.py @@ -0,0 +1,17 @@ +from typing import Awaitable, Iterable +import asyncio + +from adcm_aio_client.core.types import RequesterResponse + + +async def safe_gather(coros: Iterable[Awaitable[RequesterResponse]], msg: str) -> ExceptionGroup | None: # noqa: F821 + """ + Performs asyncio.gather() on coros, returns combined in ExceptionGroup errors + """ + results = await asyncio.gather(*coros, return_exceptions=True) + exceptions = [res for res in results if isinstance(res, Exception)] + + if exceptions: + return ExceptionGroup(msg, exceptions) # noqa: F821 # TODO: tool.ruff.target-version = "py312" & run linters + + return None diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..82be4859 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,718 @@ +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. + +[[package]] +name = "adcm-version" +version = "1.0.3" +description = "" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "adcm_version-1.0.3-py3-none-any.whl", hash = "sha256:e59bc0e6ed23ee0bc870a5a6b5c1a6ea7c671914e43c2cee6a5fb0d1e3c314ab"}, + {file = "adcm_version-1.0.3.tar.gz", hash = "sha256:2052d7c17ef72f1e32971e939ec972426978025e5c052320062f4ed3a2c90bc5"}, +] + +[package.dependencies] +version-utils = ">=0.3.2,<0.4.0" + +[[package]] +name = "anyio" +version = "4.6.2.post1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "asyncstdlib" +version = "3.13.0" +description = "The missing async toolbox" +optional = false +python-versions = "~=3.8" +files = [ + {file = "asyncstdlib-3.13.0-py3-none-any.whl", hash = "sha256:60e097c19e815f3c419a77426cf6c3653aebcb766544d631d5ce6128d0851ae8"}, + {file = "asyncstdlib-3.13.0.tar.gz", hash = "sha256:f2a6ffb44f118233bb99bef50861d6f64c432decbdcc4c2cb93b3fff40d1b533"}, +] + +[package.extras] +doc = ["sphinx", "sphinxcontrib-trio"] +test = ["black", "coverage", "flake8", "flake8-2020", "flake8-bugbear", "mypy", "pytest", "pytest-cov"] +typetest = ["mypy", "pyright", "typing-extensions"] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "docker" +version = "7.1.0" +description = "A Python library for the Docker Engine API." +optional = false +python-versions = ">=3.8" +files = [ + {file = "docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0"}, + {file = "docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c"}, +] + +[package.dependencies] +pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} +requests = ">=2.26.0" +urllib3 = ">=1.26.0" + +[package.extras] +dev = ["coverage (==7.2.7)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.1.0)", "ruff (==0.1.8)"] +docs = ["myst-parser (==0.18.0)", "sphinx (==5.1.1)"] +ssh = ["paramiko (>=2.4.3)"] +websockets = ["websocket-client (>=1.3.0)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.6" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pyright" +version = "1.1.388" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.388-py3-none-any.whl", hash = "sha256:c7068e9f2c23539c6ac35fc9efac6c6c1b9aa5a0ce97a9a8a6cf0090d7cbf84c"}, + {file = "pyright-1.1.388.tar.gz", hash = "sha256:0166d19b716b77fd2d9055de29f71d844874dbc6b9d3472ccd22df91db3dfa34"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" +typing-extensions = ">=4.1" + +[package.extras] +all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] +nodejs = ["nodejs-wheel-binaries"] + +[[package]] +name = "pytest" +version = "8.3.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.24.0" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"}, + {file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-timeout" +version = "2.3.1" +description = "pytest plugin to abort hanging tests" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9"}, + {file = "pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[[package]] +name = "pywin32" +version = "308" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "ruff" +version = "0.7.3" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"}, + {file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"}, + {file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"}, + {file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"}, + {file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"}, + {file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"}, + {file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "testcontainers" +version = "4.8.2" +description = "Python library for throwaway instances of anything that can run in a Docker container" +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "testcontainers-4.8.2-py3-none-any.whl", hash = "sha256:9e19af077cd96e1957c13ee466f1f32905bc6c5bc1bc98643eb18be1a989bfb0"}, + {file = "testcontainers-4.8.2.tar.gz", hash = "sha256:dd4a6a2ea09e3c3ecd39e180b6548105929d0bb78d665ce9919cb3f8c98f9853"}, +] + +[package.dependencies] +docker = "*" +typing-extensions = "*" +urllib3 = "*" +wrapt = "*" + +[package.extras] +arangodb = ["python-arango (>=7.8,<8.0)"] +aws = ["boto3", "httpx"] +azurite = ["azure-storage-blob (>=12.19,<13.0)"] +chroma = ["chromadb-client"] +clickhouse = ["clickhouse-driver"] +cosmosdb = ["azure-cosmos"] +db2 = ["ibm_db_sa", "sqlalchemy"] +generic = ["httpx", "redis"] +google = ["google-cloud-datastore (>=2)", "google-cloud-pubsub (>=2)"] +influxdb = ["influxdb", "influxdb-client"] +k3s = ["kubernetes", "pyyaml"] +keycloak = ["python-keycloak"] +localstack = ["boto3"] +mailpit = ["cryptography"] +minio = ["minio"] +mongodb = ["pymongo"] +mssql = ["pymssql", "sqlalchemy"] +mysql = ["pymysql[rsa]", "sqlalchemy"] +nats = ["nats-py"] +neo4j = ["neo4j"] +opensearch = ["opensearch-py"] +oracle = ["oracledb", "sqlalchemy"] +oracle-free = ["oracledb", "sqlalchemy"] +qdrant = ["qdrant-client"] +rabbitmq = ["pika"] +redis = ["redis"] +registry = ["bcrypt"] +scylla = ["cassandra-driver (==3.29.1)"] +selenium = ["selenium"] +sftp = ["cryptography"] +test-module-import = ["httpx"] +trino = ["trino"] +weaviate = ["weaviate-client (>=4.5.4,<5.0.0)"] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "version-utils" +version = "0.3.2" +description = "Library for parsing system package strings and comparing package versions" +optional = false +python-versions = "*" +files = [ + {file = "version_utils-0.3.2-py2.py3-none-any.whl", hash = "sha256:4e0f3dff669d7a081dd66d8b616752dc309e7246a7b2c5ac800dde5ec0d9a555"}, + {file = "version_utils-0.3.2.tar.gz", hash = "sha256:308191f111395ac19ec5ef4650764af29962a6415d8391785027ae5328579299"}, +] + +[[package]] +name = "wrapt" +version = "1.17.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.8" +files = [ + {file = "wrapt-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a0c23b8319848426f305f9cb0c98a6e32ee68a36264f45948ccf8e7d2b941f8"}, + {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ca5f060e205f72bec57faae5bd817a1560fcfc4af03f414b08fa29106b7e2d"}, + {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e185ec6060e301a7e5f8461c86fb3640a7beb1a0f0208ffde7a65ec4074931df"}, + {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb90765dd91aed05b53cd7a87bd7f5c188fcd95960914bae0d32c5e7f899719d"}, + {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:879591c2b5ab0a7184258274c42a126b74a2c3d5a329df16d69f9cee07bba6ea"}, + {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fce6fee67c318fdfb7f285c29a82d84782ae2579c0e1b385b7f36c6e8074fffb"}, + {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0698d3a86f68abc894d537887b9bbf84d29bcfbc759e23f4644be27acf6da301"}, + {file = "wrapt-1.17.0-cp310-cp310-win32.whl", hash = "sha256:69d093792dc34a9c4c8a70e4973a3361c7a7578e9cd86961b2bbf38ca71e4e22"}, + {file = "wrapt-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:f28b29dc158ca5d6ac396c8e0a2ef45c4e97bb7e65522bfc04c989e6fe814575"}, + {file = "wrapt-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:74bf625b1b4caaa7bad51d9003f8b07a468a704e0644a700e936c357c17dd45a"}, + {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f2a28eb35cf99d5f5bd12f5dd44a0f41d206db226535b37b0c60e9da162c3ed"}, + {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81b1289e99cf4bad07c23393ab447e5e96db0ab50974a280f7954b071d41b489"}, + {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2939cd4a2a52ca32bc0b359015718472d7f6de870760342e7ba295be9ebaf9"}, + {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a9653131bda68a1f029c52157fd81e11f07d485df55410401f745007bd6d339"}, + {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4e4b4385363de9052dac1a67bfb535c376f3d19c238b5f36bddc95efae15e12d"}, + {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bdf62d25234290db1837875d4dceb2151e4ea7f9fff2ed41c0fde23ed542eb5b"}, + {file = "wrapt-1.17.0-cp311-cp311-win32.whl", hash = "sha256:5d8fd17635b262448ab8f99230fe4dac991af1dabdbb92f7a70a6afac8a7e346"}, + {file = "wrapt-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:92a3d214d5e53cb1db8b015f30d544bc9d3f7179a05feb8f16df713cecc2620a"}, + {file = "wrapt-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:89fc28495896097622c3fc238915c79365dd0ede02f9a82ce436b13bd0ab7569"}, + {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875d240fdbdbe9e11f9831901fb8719da0bd4e6131f83aa9f69b96d18fae7504"}, + {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5ed16d95fd142e9c72b6c10b06514ad30e846a0d0917ab406186541fe68b451"}, + {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18b956061b8db634120b58f668592a772e87e2e78bc1f6a906cfcaa0cc7991c1"}, + {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:daba396199399ccabafbfc509037ac635a6bc18510ad1add8fd16d4739cdd106"}, + {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4d63f4d446e10ad19ed01188d6c1e1bb134cde8c18b0aa2acfd973d41fcc5ada"}, + {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8a5e7cc39a45fc430af1aefc4d77ee6bad72c5bcdb1322cfde852c15192b8bd4"}, + {file = "wrapt-1.17.0-cp312-cp312-win32.whl", hash = "sha256:0a0a1a1ec28b641f2a3a2c35cbe86c00051c04fffcfcc577ffcdd707df3f8635"}, + {file = "wrapt-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:3c34f6896a01b84bab196f7119770fd8466c8ae3dfa73c59c0bb281e7b588ce7"}, + {file = "wrapt-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:714c12485aa52efbc0fc0ade1e9ab3a70343db82627f90f2ecbc898fdf0bb181"}, + {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da427d311782324a376cacb47c1a4adc43f99fd9d996ffc1b3e8529c4074d393"}, + {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba1739fb38441a27a676f4de4123d3e858e494fac05868b7a281c0a383c098f4"}, + {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e711fc1acc7468463bc084d1b68561e40d1eaa135d8c509a65dd534403d83d7b"}, + {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:140ea00c87fafc42739bd74a94a5a9003f8e72c27c47cd4f61d8e05e6dec8721"}, + {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73a96fd11d2b2e77d623a7f26e004cc31f131a365add1ce1ce9a19e55a1eef90"}, + {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0b48554952f0f387984da81ccfa73b62e52817a4386d070c75e4db7d43a28c4a"}, + {file = "wrapt-1.17.0-cp313-cp313-win32.whl", hash = "sha256:498fec8da10e3e62edd1e7368f4b24aa362ac0ad931e678332d1b209aec93045"}, + {file = "wrapt-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd136bb85f4568fffca995bd3c8d52080b1e5b225dbf1c2b17b66b4c5fa02838"}, + {file = "wrapt-1.17.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:17fcf043d0b4724858f25b8826c36e08f9fb2e475410bece0ec44a22d533da9b"}, + {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4a557d97f12813dc5e18dad9fa765ae44ddd56a672bb5de4825527c847d6379"}, + {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0229b247b0fc7dee0d36176cbb79dbaf2a9eb7ecc50ec3121f40ef443155fb1d"}, + {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8425cfce27b8b20c9b89d77fb50e368d8306a90bf2b6eef2cdf5cd5083adf83f"}, + {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9c900108df470060174108012de06d45f514aa4ec21a191e7ab42988ff42a86c"}, + {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4e547b447073fc0dbfcbff15154c1be8823d10dab4ad401bdb1575e3fdedff1b"}, + {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:914f66f3b6fc7b915d46c1cc424bc2441841083de01b90f9e81109c9759e43ab"}, + {file = "wrapt-1.17.0-cp313-cp313t-win32.whl", hash = "sha256:a4192b45dff127c7d69b3bdfb4d3e47b64179a0b9900b6351859f3001397dabf"}, + {file = "wrapt-1.17.0-cp313-cp313t-win_amd64.whl", hash = "sha256:4f643df3d4419ea3f856c5c3f40fec1d65ea2e89ec812c83f7767c8730f9827a"}, + {file = "wrapt-1.17.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:69c40d4655e078ede067a7095544bcec5a963566e17503e75a3a3e0fe2803b13"}, + {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f495b6754358979379f84534f8dd7a43ff8cff2558dcdea4a148a6e713a758f"}, + {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:baa7ef4e0886a6f482e00d1d5bcd37c201b383f1d314643dfb0367169f94f04c"}, + {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8fc931382e56627ec4acb01e09ce66e5c03c384ca52606111cee50d931a342d"}, + {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8f8909cdb9f1b237786c09a810e24ee5e15ef17019f7cecb207ce205b9b5fcce"}, + {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ad47b095f0bdc5585bced35bd088cbfe4177236c7df9984b3cc46b391cc60627"}, + {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:948a9bd0fb2c5120457b07e59c8d7210cbc8703243225dbd78f4dfc13c8d2d1f"}, + {file = "wrapt-1.17.0-cp38-cp38-win32.whl", hash = "sha256:5ae271862b2142f4bc687bdbfcc942e2473a89999a54231aa1c2c676e28f29ea"}, + {file = "wrapt-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:f335579a1b485c834849e9075191c9898e0731af45705c2ebf70e0cd5d58beed"}, + {file = "wrapt-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d751300b94e35b6016d4b1e7d0e7bbc3b5e1751e2405ef908316c2a9024008a1"}, + {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7264cbb4a18dc4acfd73b63e4bcfec9c9802614572025bdd44d0721983fc1d9c"}, + {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33539c6f5b96cf0b1105a0ff4cf5db9332e773bb521cc804a90e58dc49b10578"}, + {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c30970bdee1cad6a8da2044febd824ef6dc4cc0b19e39af3085c763fdec7de33"}, + {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bc7f729a72b16ee21795a943f85c6244971724819819a41ddbaeb691b2dd85ad"}, + {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6ff02a91c4fc9b6a94e1c9c20f62ea06a7e375f42fe57587f004d1078ac86ca9"}, + {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dfb7cff84e72e7bf975b06b4989477873dcf160b2fd89959c629535df53d4e0"}, + {file = "wrapt-1.17.0-cp39-cp39-win32.whl", hash = "sha256:2399408ac33ffd5b200480ee858baa58d77dd30e0dd0cab6a8a9547135f30a88"}, + {file = "wrapt-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f763a29ee6a20c529496a20a7bcb16a73de27f5da6a843249c7047daf135977"}, + {file = "wrapt-1.17.0-py3-none-any.whl", hash = "sha256:d2c63b93548eda58abf5188e505ffed0229bf675f7c3090f8e36ad55b8cbc371"}, + {file = "wrapt-1.17.0.tar.gz", hash = "sha256:16187aa2317c731170a88ef35e8937ae0f533c402872c1ee5e6d079fcf320801"}, +] + +[metadata] +lock-version = "2.0" +python-versions = "^3.12" +content-hash = "f7bae9e0c1c116fe81eca25b419ce706a5b74821d1910f35632270f51a75727e" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..294c2ee0 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,87 @@ +[tool.poetry] +name = "adcm-aio-client" +version = "0.1.0" +description = "ADCM Client" +authors = ["Aleksandr Alferov "] +license = "Apache License Version 2.0" +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.12" +httpx = "^0.27.2" +asyncstdlib = "^3.13.0" +adcm-version = "^1.0.3" + +[tool.poetry.group.dev] +optional = true + +[tool.poetry.group.dev.dependencies] +ruff = "^0.7.1" +pyright = "^1.1.387" + +[tool.poetry.group.test] +optional = true + +[tool.poetry.group.test.dependencies] +pytest = "^8.3.3" +pytest-asyncio = "^0.24.0" +testcontainers = "^4.8.2" +pyyaml = "^6.0.2" +pytest-timeout = "^2.3.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" +timeout = 300 + +[tool.ruff] +line-length = 120 + +[tool.ruff.lint.isort] +force-sort-within-sections = true +length-sort-straight = true +order-by-type = true + +[tool.ruff.lint] +select = [ + # Pyflakes + "F", + # Pycodestyle + "E", "W", + # isort + "I", + # "COM" - The following rules may cause conflicts when used with the formatter: `COM812`. + # To avoid unexpected behavior, we recommend disabling these rules + "N", "UP", "YTT", "ANN", + "S", "BLE", "FBT", "B", "COM", "A", "C4", + "DTZ", "ICN", "PIE", "Q", "RET", + "SIM", "ARG", "PTH", "PLE", "TRY" +] + +ignore = [ + "COM812", + "S101", + "TRY003", +] + +[tool.pyright] +include = [ + "adcm_aio_client", "tests" +] + +executionEnvironments = [ + { root = "." }, + { root = "tests", extraPaths = [ "." ] }, +] + +typeCheckingMode = "standard" +reportUnnecessaryTypeIgnoreComment = true + +reportMissingImports = "error" +reportMissingTypeStubs = false + +pythonVersion = "3.12" +pythonPlatform = "Linux" diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/bundle.py b/tests/integration/bundle.py new file mode 100644 index 00000000..3c52456f --- /dev/null +++ b/tests/integration/bundle.py @@ -0,0 +1,67 @@ +from pathlib import Path +from tarfile import TarFile +from typing import Any +import shutil + +import yaml + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.objects.cm import Bundle + + +def pack_bundle(from_dir: Path, to: Path) -> Path: + archive = (to / from_dir.name).with_suffix(".tgz") + + with TarFile(name=archive, mode="w") as tar: + for entry in from_dir.iterdir(): + tar.add(entry) + + return archive + + +def modify_yaml_field( + yaml_content: list[dict[str, Any]], target_name: str, field_to_modify: str, new_value: str | int +) -> dict[str, Any]: + for entry in yaml_content: + # Check if the entry matches the specified 'name' + if entry.get(target_name) == field_to_modify: + entry[target_name] = new_value + return entry + raise ValueError(f"Field '{field_to_modify}' not found in config.yaml") + + +async def create_bundles_by_template( + adcm_client: ADCMClient, + tmp_path: Path, + path_to_template_bundle: Path, + target_name: str, + field_to_modify: str, + new_value: str, + number_of_bundles: int, +) -> list[Bundle]: + created_bundles = [] + for i in range(number_of_bundles): + # Create a new path for the temporary bundle + new_bundle_path = tmp_path / f"{path_to_template_bundle.name}_{i}" + + # Copy the whole directory of the template bundle to the new path + shutil.copytree(path_to_template_bundle, new_bundle_path) + + # Update the yaml field in the new config + new_config_path = new_bundle_path / "config.yaml" + with Path.open(new_config_path) as file: + new_config_data = yaml.safe_load(file) + + modify_yaml_field( + new_config_data, target_name=target_name, field_to_modify=field_to_modify, new_value=f"{new_value}_{i}" + ) + + with Path.open(new_config_path, "w") as file: + yaml.dump(new_config_data, file) + + (tmp_path / f"{new_bundle_path.name}_packed").mkdir() + bundle_path = pack_bundle(from_dir=new_bundle_path, to=(tmp_path / f"{new_bundle_path.name}_packed")) + created_bundle = await adcm_client.bundles.create(source=bundle_path, accept_license=False) + created_bundles.append(created_bundle) + + return created_bundles diff --git a/tests/integration/bundles/cluster_requires_component/config.yaml b/tests/integration/bundles/cluster_requires_component/config.yaml new file mode 100644 index 00000000..97c8aa8a --- /dev/null +++ b/tests/integration/bundles/cluster_requires_component/config.yaml @@ -0,0 +1,40 @@ +--- +- type: cluster + name: cluster_requires_component + version: '1.0' + edition: community + +- type: service + name: hbase + display_name: HBase + version: "2.2.7" + components: + master: + display_name: "HBase Master Server" + requires: + - service: zookeeper + component: SERVER + - service: hdfs + component: namenode + constraint: [ 1,+ ] + +- type: service + name: zookeeper + display_name: Zookeeper + version: 3.5.10 + components: + SERVER: + display_name: Zookeeper Server + constraint: [ odd ] + +- type: service + name: hdfs + display_name: HDFS + version: "3.1.2" + components: + namenode: + display_name: "HDFS NameNode" + requires: + - service: zookeeper + component: SERVER + constraint: [ 2,+ ] diff --git a/tests/integration/bundles/cluster_with_license/EULA.txt b/tests/integration/bundles/cluster_with_license/EULA.txt new file mode 100644 index 00000000..0ecb87a1 --- /dev/null +++ b/tests/integration/bundles/cluster_with_license/EULA.txt @@ -0,0 +1,68 @@ + +LICENSE AGREEMENT + + +1. DEFINITIONS. THE TERMS AND DEFINITIONS USED IN THE AGREEMENT ARE DESCRIBED IN SECTION 1. + 1.1. "Confidential Information" shall mean all disclosed information with regard to this Agreement or the Product whether furnished in oral, written or other tangible or intangible form. The Confidential Information shall include, but not limited to, components of business plans, products, inventions, design plans, financial plans, customer related information, strategies and other information of similar nature. + 1.2. "Documentation" shall mean all user manuals and administrator guides as well as other technical documents. + 1.3. "Product" shall mean any version of software for computers and data bases, including, but not limited to, the computer software Arenadata Hadoop Platform, that are built on an open SW basis managed by the Apache Software Foundation licensed under the terms and conditions of the Apache 2.0 License (http://www.apache.org/licenses/LICENSE-2.0). + 1.4. "Intellectual Property" shall mean all systems, methods, algorithms, structures, libraries, applications (supplementary software), components/parts of the Product (including texts and fonts), all modules, other elements of the Product copied and/or incorporated in all working software, any copies, documentation, authorship, logos and other information included in the Product. + 1.5. Right Owner - Arenadata Software LLC. + +2. INTELLECTUAL PROPERTY EXCLUSIVE RIGHTS + 2.1. The Product, systems, methods, algorithms, structures, libraries, applications (supplementary software), components/parts of the Product (including texts and fonts), all modules, other elements of the Product copied and/or incorporated in all working software, any copies, documentation, authorship, logos and other information included in the Product shall be objects of intellectual property and commercial secret of the Right Owner, i.e. Arenadata Software LLC and/or its affiliates and shall be protected according to the Russian Federation effective legislation on intellectual property, commercial secret as well as the provisions of this Agreement. + 2.2. The Right Owner shall guarantee that it has relevant rights to use the ARENADATA name and the ARENADATA logo protected under the Russian Federation legislation on copyrights. + 2.3. The use of the Product in violation of the terms and conditions hereof shall be deemed to be a breach of the Russian Federation legislation on copyrights and shall constitute a sufficient ground for depriving the Licensee of the rights granted in respect of the Product. + 2.4. The Right Owner shall assist the Licensee in defending against all claims filed by third parties in respect of the intellectual and industrial ownership of the Product package. Should any such claim be filed the Licensee shall immediately inform the Right Owner about all complaints made by the third party and provide all necessary information regarding the dispute. + 2.5. The Right Owner represents that as of the time of the transfer of the right to use the Product, to the best of its knowledge, there are no third party's rights that could be infringed upon through granting the Licensee with the non-exclusive right to use the Product hereunder. + 2.6. Within the term of this Agreement the Right Owner shall refrain from any actions that can hinder the Licensee from exercising the right to use the result of intellectual activity granted to it within the limits set forth in this Agreement. + +3. SUBJECT MATTER OF THE AGREEMENT + 3.1. The subject matter of this Agreement shall be Right Owner's provision to the Licensee (provided that the Licensee meets all technical requirements described in the technical and user documentation, and all terms and conditions and restrictions set forth herein) of non-exclusive rights to use the Product within the limits and by the methods specified herein. The description of the Product and instructions regarding the use thereof shall be included in the Product suite and may be provided to the Licensee upon request in hard copies. + 3.2. The terms and conditions and the procedure of paying remuneration for provision of the right to use the Product shall be set forth in separate agreements with the Right Owner or its authorized representatives/partners. + +4. SCOPE OF THE RIGHTS TO BE TRANSFERRED AND METHODS OF USE + 4.1. The Licensee shall be provided with the non-exclusive right to use the Product through installing and launching the Product on the relevant number of processors and through copying thereof only for the purpose of generating a back-up or archival copy of the Product. The number of processors shall be determined by separate agreements with the Right Owner or partners of the Right Owner. + 4.2. The right to use the Product granted to the Licensee shall not include: + 4.2.1. The right to use the Product or relevant documentation thereto for any purposes other than those permitted hereby. + 4.2.2. The right to modify, hide, delete or enter any changes into the trademarks, trade names, marks or notes made onto the software or constituting an inseparable part of the software or the relevant documentation thereto. While producing above mentioned copies the Licensee shall reproduce onto the copy/copies all information regarding the copyright or other marks made on the software or the relevant documentation thereto. + 4.2.3. The right to modify, supplement, decompile, reverse-engineer, disassemble, translate, adapt, reorganize, make corrections or enter any other changes to the Product, components of the Product or relevant documentation thereto. + 4.2.4. The right to assign the granted right to use the Product, including the media and documentation, to legal entities and individuals through selling, renting, letting, lending or otherwise alienating, including, but not limited to, the provision of the software as a "cloud" service. + 4.2.5. The right to transfer to third parties the activation and access codes (or copies thereof) for the Product. + 4.2.6. The right to use the Product on behalf of any third party. + 4.2.7. The right to make any actions with regard to the Product that violate the Russian and international norms of legislation on copyrights and use of software. + 4.3. The right to use the Product to be granted to the Licensee shall be in effect within the time period set forth in the SW Specifications (Annex No 1 to the Contract) and within the boundaries of the Russian Federation. + 4.4. The use of the Product shall be allowed within the standard SW suite, it shall also be allowed to build the Product into software created anew or existing. + 4.5. It shall be allowed to build in and produce derivative products subject to the rules of application and use of the free Apache SW. + +5. TRANSFER PROCEDURE + 5.1. A copy of the Product shall be furnished to the Licensee on an electronic medium. The user documentation shall be provided to the Licensee on an electronic medium unless set forth otherwise in the contract with the Licensee. + 5.2. Any changes, amendments and other actions related to the transfer, activation of, provision of access to the Product shall be made only by the Right Owner either directly or through its authorized representatives. + 5.2.1. The license agreement for the Product may be re-registered to another Licensee only by the Right Owner on the basis of the current Licensee's written application. The transfer shall be applicable to the entire Product (including all components, media, printed materials and updates). + 5.2.2. The person who obtained the Product as a result of such all-at-once transfer shall accept all terms and conditions of this Agreement, including the obligation not to assign the rights to the Product and this Agreement to any third parties. A written confirmation of acceptance of this condition shall be forwarded to the Right Owner. + 5.2.3. When the right to the Product is assigned to another person (a new Licensee), the Licensee under this Agreement shall delete all back-up copies, if any, of the Product to be transferred. + +6. WARRANTY OBLIGATIONS + 6.1. The Right Owner shall warrant that no copyrights, allied rights or any other rights of third parties are infringed upon through transferring the rights to the Product. + 6.2. The Product with the rights to use thereof specified in this Agreement shall be provided "as is" without any obligations on the part of the Right Owner regarding its fitness for the Licensee's purposes or for the use together with certain software. + 6.3. The Right Owner shall provide no warranties with regard to the software and hardware of other manufactures that may be supplied to the Licensee together with the Product or as an integral part of the Product or may be attached to the Product. + 6.4. The Right Owner may provide support services with regard to the Product under separate agreements made by the Licensee with the Right Owner or its partners according to the current terms and conditions of support services of the Right Owner. + +7. LIABILITY + 7.1. The Right Owner and its affiliates shall bear no liability and shall not reimburse any direct or indirect losses, including lost profits, loss of confidential information of the Licensee caused by faults and/or errors made in operating the Product, improper conduct of the personnel of the Licensee or third parties, or breakdowns of technical means and failures of electrical equipment. + 7.2. The Right Owner and its affiliates shall neither be liable for nor make any warranties with regard to any performance characteristics of the Product other than those committed and described in the user documentation, unless the Licensee bought the Product from the Right Owner or its authorized representatives. + 7.3. The Right Owner and its affiliates shall neither make any implicit warranties of the merchantability of the Product or its fitness for a particular purpose. This software shall be provided on an "as is" basis and Arenadata Software LLC shall not be obliged to provide any maintenance, support, updating, extension or modification services with regard to the Product. + 7.4. The Licensee shall bear liability for reimbursing any damages arising out of or caused by the use of the Product and the information contained therein or generated by the Product and resulting from its interaction (or a failure to interact in a proper manner) with any other hardware, array of systems or software provided by the Right Owner and/or third parties. + 7.5. The Licensee shall undertake to compensate the Right Owner for any costs, including legal expenses, attorney fees and hold the Right Owner harmless against any claims, disputes, litigations, losses, damages, expenses, costs, any other liability caused by any unlawful, illegal use of the Product (including the use by any party relating to the Licensee, as well as by persons who were authorized to act on behalf of the Licensee explicitly in violation of this Agreement and effective legislation). + +8. SPECIAL, ADDITIONAL TERMS AND CONDITIONS + 8.1. The rights to use the Product shall be in effect only if the Product is genuine. The Product bought lawfully shall be supplied with the unique identification number and the Licensee's data specified when the rights to use the Product are acquired. The Licensee shall bear full liability for correctness of the data transferred directly to the Right Owner or its authorized representative. The Licensee shall advise the Right Owner or its authorized representative of any deficiencies it finds in the provided data, changes in its address and banking details. + 8.2. This Agreement as described above shall be deemed to have been entered into and shall come into legal force from the time of commencement of the installation and use of the Product and shall constitute an entire agreement between the Licensee and the Right Owner regarding its terms and conditions. Should a competent court hold any provisions of this Agreement null and void, unlawful, all other provisions of the Agreement shall remain in force and effect. All disputes and contradictions of the parties to this Agreement shall be settled by means of negotiations, and if the negotiations fail to settle the dispute, such disputes shall be submitted to the Court of Arbitration of Moscow according to the Russian Federation effective legislation. + 8.3. Violation of the terms and conditions of this Agreement shall be subject to a liability according to the Russian Federation effective legislation and this Agreement. Without prejudice to any of its rights the Right Owner shall be entitled to unilaterally terminate this Agreement, should the Licensee fail to observe the terms, conditions and restrictions set forth herein. The money paid by the Licensee for the use of the Product shall not be returned. + 8.4. The Product may include software or any other code distributable subject to the terms and conditions of licenses of third party suppliers. The Customer shall accept and agree with the terms and conditions of such third party licensees applicable to the third party software and shall acknowledge that such third party suppliers disclaim any representations and warranties with regard to the products or any part thereof and assume no liability for any claims that may arise with regard to the products, as a result of the usage thereof by the customer or due to the impossibility to use thereof. Should the Licensee become aware of any information indicating the infringement of the Right Owner's copyright (unlawful resale of the Product, links to pirate websites) please forward such information to: https://support.arenadata.io/. + 8.5. For any additional information regarding the issues relating to this Agreement, any further explanations with regard to the use of the Product the Licensee may apply to the address below: https://support.arenadata.io/. + +9. AGREEMENT TERMINATION + 9.1. The term of this Agreement shall commence from the date of its being signed by the accepting party and shall be in effect for the effective period of the non-exclusive right to use the SW, unless it is terminated due to the Licensee's failure to observe any provision of this Agreement. If any provision of this Agreement is violated, the Right Owner may terminate this Agreement immediately. + 9.2. Upon termination of this Agreement all rights granted to the Licensee by this Agreement shall be forthwith terminated and the Licensee shall immediately return all confidential information to the Right Owner and then delete all confidential information on its side. + 9.3. This clause and sections 2, 3, 4, 6, 7, 8 shall survive any termination of this Agreement. \ No newline at end of file diff --git a/tests/integration/bundles/cluster_with_license/config.yaml b/tests/integration/bundles/cluster_with_license/config.yaml new file mode 100644 index 00000000..a8151915 --- /dev/null +++ b/tests/integration/bundles/cluster_with_license/config.yaml @@ -0,0 +1,138 @@ +--- +- type: cluster + name: cluster_with_license + # display_name: Cluster With License + version: &version '2.0' + edition: enterprise + config_group_customization: true + license: ./EULA.txt + upgrade: + - name: upgrade + versions: &correct_versions + min: '1.0' + max: '2.0' + states: &always_available + available: any + + - name: upgrade_via_action_simple + versions: *correct_versions + states: *always_available + scripts: &upgrade_scripts + - name: pre + script: ./playbook.yaml + script_type: ansible + - name: switch + script: bundle_switch + script_type: internal + - name: post + script: ./playbook.yaml + script_type: ansible + + - name: upgrade_via_action_complex + versions: *correct_versions + states: *always_available + scripts: *upgrade_scripts + config: + - name: simple + type: string + required: false + - name: grouped + type: group + subs: + - name: simple + type: integer + default: 4 + - name: second + type: float + default: 4.3 + - name: after + type: list + default: + - "1" + - "woohoo" + hc_acl: + - action: add + service: service_1 + component: component_1 + - action: remove + service: service_1 + component: component_2 + ui_options: + disclaimer: "Cool upgrade" + + - name: unfit_version + versions: + min_strict: '0' + max: '0.3' + states: *always_available + + - name: unfit_state + versions: *correct_versions + states: + available: + - doesnotexist + + config: &config + - name: string + type: string + required: false + default: string + - name: group + type: group + subs: + - name: string + type: string + required: false + default: string + actions: + action: &action + type: job + script: ./playbook.yaml + script_type: ansible + states: + available: any + +- name: service_1 + type: service + version: *version + config: *config + license: ./license.txt + actions: + action: *action + components: + component_1: + constraint: [ 0, + ] + config: *config + actions: + action_1_comp_1: *action + action_2_comp_1: *action + component_2: + constraint: [ 0, + ] + config: *config + actions: + action_1_comp_2: *action + +- name: service_2 + type: service + version: *version + config: *config + actions: + action_1_service_2: *action + action_2_service_2: *action + +- name: service_3_manual_add + type: service + version: *version + config: *config + + +- name: service_with_bound_to + type: service + version: "hehe" + + components: + will_have_bound_to: + description: This component will have `bound_to` constraint after upgrade + bound_to: + service: service_1 + component: component_1 diff --git a/tests/integration/bundles/cluster_with_license/license.txt b/tests/integration/bundles/cluster_with_license/license.txt new file mode 100644 index 00000000..2831d9e6 --- /dev/null +++ b/tests/integration/bundles/cluster_with_license/license.txt @@ -0,0 +1 @@ +License diff --git a/tests/integration/bundles/cluster_with_license/playbook.yaml b/tests/integration/bundles/cluster_with_license/playbook.yaml new file mode 100644 index 00000000..be7ea537 --- /dev/null +++ b/tests/integration/bundles/cluster_with_license/playbook.yaml @@ -0,0 +1,10 @@ +--- +- name: sleep + hosts: all + connection: local + gather_facts: no + + tasks: + - name: sleep + pause: + seconds: 1 diff --git a/tests/integration/bundles/complex_cluster/actions.yaml b/tests/integration/bundles/complex_cluster/actions.yaml new file mode 100644 index 00000000..95d47d24 --- /dev/null +++ b/tests/integration/bundles/complex_cluster/actions.yaml @@ -0,0 +1,16 @@ +- name: letsgo + hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: Success + debug: + msg: "successful step" + tags: [ok] + + - name: Fail + fail: + msg: "failed step" + tags: [fail] + diff --git a/tests/integration/bundles/complex_cluster/config.yaml b/tests/integration/bundles/complex_cluster/config.yaml new file mode 100644 index 00000000..041ee101 --- /dev/null +++ b/tests/integration/bundles/complex_cluster/config.yaml @@ -0,0 +1,199 @@ +- type: cluster + name: Some Cluster + version: 1 + + actions: &actions + success: &job + display_name: I will survive + type: job + script_type: ansible + script: ./actions.yaml + allow_to_terminate: true + allow_for_action_host_group: true + params: + ansible_tags: ok + masking: {} + + fail: + <<: *job + display_name: no Way + params: + ansible_tags: fail + + success_task: + display_name: Lots Of me + type: task + masking: {} + allow_to_terminate: true + allow_for_action_host_group: true + scripts: + - &success_job + name: first + script_type: ansible + script: ./actions.yaml + params: + ansible_tags: ok + - <<: *success_job + name: second + display_name: AnothEr + + config: + - name: string_field + type: string + default: "string value" + +- type: service + name: example_1 + display_name: First Example + version: 1.0 + + config: + - name: root_int + display_name: Integer At Root + type: integer + default: 100 + + components: &example_c + first: + display_name: First Component + second: + display_name: Second Component + third_one: + display_name: This Is Different + +- type: service + name: example_2 + version: "4.23.456" + + components: *example_c + +- type: service + name: with_actions + version: 2.3 + actions: *actions + + components: + c1: + display_name: Awesome + actions: *actions + c2: + actions: *actions + +- type: service + name: complex_config + version: 0.3 + config_group_customization: yes + + config: + - &complexity_level + name: complexity_level + display_name: Complexity Level + type: integer + default: 4 + required: no + + - name: very_important_flag + display_name: Set me + type: float + required: true + + - name: cant_find + type: string + default: "cantCme" + ui_options: + invisible: true + + - &country_codes + name: country_codes + type: structure + yspec: ./schema.yaml + default: + - country: Greece + code: 30 + - country: France + code: 33 + - country: Spain + code: 34 + + - &a_lot_of_text + name: a_lot_of_text + display_name: A lot of text + type: group + subs: + - name: cant_find + type: string + default: "cantCme" + ui_options: + invisible: true + - name: simple_string + type: string + required: no + default: "simple string here" + - name: big_text + type: text + required: no + default: "" + - name: license + description: "Edit license as you want :3" + read_only: any + type: file + default: ./text.txt + - name: pass + type: password + required: no + - name: sag + display_name: Group-like structure + type: structure + yspec: ./sag.yaml + required: false + + - &from_doc + name: from_doc + display_name: Examples from documentation + type: group + ui_options: + advanced: true + subs: + - name: memory_size + type: integer + default: 16 + min: 2 + max: 64 + ui_options: + invinsible: true + - name: cluster_host + type: variant + required: false + source: + type: builtin + name: host_in_cluster + - name: secret_mapping + display_name: Map Secrets + type: secretmap + required: no + - name: mount_points + type: list + default: + - /dev/rdisk0s1 + - /dev/rdisk0s2 + - /dev/rdisk0s3 + read_only: any + - name: person + type: map + default: + name: Joe + age: "24" + sex: m + - name: protocol + type: option + option: { http: "80", https: "443" } + default: "80" + - name: agroup + display_name: Optional + type: group + activatable: true + active: false + subs: + - name: justhere + type: integer + required: false diff --git a/tests/integration/bundles/complex_cluster/license.txt b/tests/integration/bundles/complex_cluster/license.txt new file mode 100644 index 00000000..51fca54c --- /dev/null +++ b/tests/integration/bundles/complex_cluster/license.txt @@ -0,0 +1,11 @@ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/tests/integration/bundles/complex_cluster/sag.yaml b/tests/integration/bundles/complex_cluster/sag.yaml new file mode 100644 index 00000000..2420bc5e --- /dev/null +++ b/tests/integration/bundles/complex_cluster/sag.yaml @@ -0,0 +1,19 @@ +root: + match: dict + items: + nested: inner_group + quantity: integer + +inner_group: + match: dict + items: + attr: string + op: string + tech: string + invisible_items: [ "tech" ] + +integer: + match: int + +string: + match: string diff --git a/tests/integration/bundles/complex_cluster/schema.yaml b/tests/integration/bundles/complex_cluster/schema.yaml new file mode 100644 index 00000000..c6cb5736 --- /dev/null +++ b/tests/integration/bundles/complex_cluster/schema.yaml @@ -0,0 +1,17 @@ + +--- +root: + match: list + item: country_code + +country_code: + match: dict + items: + country: string + code: integer + +string: + match: string + +integer: + match: int diff --git a/tests/integration/bundles/complex_cluster/text.txt b/tests/integration/bundles/complex_cluster/text.txt new file mode 100644 index 00000000..51fca54c --- /dev/null +++ b/tests/integration/bundles/complex_cluster/text.txt @@ -0,0 +1,11 @@ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/tests/integration/bundles/complex_provider/config.yaml b/tests/integration/bundles/complex_provider/config.yaml new file mode 100644 index 00000000..cafef698 --- /dev/null +++ b/tests/integration/bundles/complex_provider/config.yaml @@ -0,0 +1,89 @@ +--- +- type: provider + name: complex_provider + version: &version '1.0' + config_group_customization: true + + config: + - name: json + type: json + required: false + default: + key: value + - name: group + type: group + subs: + - name: map + type: map + required: false + default: + integer_key: '10' + string_key: string + - name: activatable_group + type: group + activatable: True + active: True + subs: + - name: secretmap + type: secretmap + required: false + default: + integer_key: '10' + string_key: string + actions: + provider_action: + type: job + script: ./playbook.yaml + script_type: ansible + states: + available: any + +- type: host + name: host + version: *version + config: + - name: structure + type: structure + required: false + yspec: ./schema.yaml + default: + - integer: 1 + string: string1 + - integer: 2 + string: string2 + - name: variant + type: variant + source: + type: config + name: group/list + default: value1 + - name: group + type: group + subs: + - name: list + type: list + required: false + default: + - value1 + - value2 + - value3 + - name: activatable_group + type: group + activatable: True + active: True + subs: + - name: option + type: option + required: false + option: + string1: string1 + string2: string2 + default: string1 + actions: + host_action: + display_name: "host_action" + type: job + script: ./playbook.yaml + script_type: ansible + states: + available: any diff --git a/tests/integration/bundles/complex_provider/playbook.yaml b/tests/integration/bundles/complex_provider/playbook.yaml new file mode 100644 index 00000000..be7ea537 --- /dev/null +++ b/tests/integration/bundles/complex_provider/playbook.yaml @@ -0,0 +1,10 @@ +--- +- name: sleep + hosts: all + connection: local + gather_facts: no + + tasks: + - name: sleep + pause: + seconds: 1 diff --git a/tests/integration/bundles/complex_provider/schema.yaml b/tests/integration/bundles/complex_provider/schema.yaml new file mode 100755 index 00000000..ad629d13 --- /dev/null +++ b/tests/integration/bundles/complex_provider/schema.yaml @@ -0,0 +1,15 @@ +--- +root: + match: list + item: variable +variable: + match: dict + items: + string: string + integer: integer + required_items: + - string +string: + match: string +integer: + match: int diff --git a/tests/integration/bundles/simple_cluster/config.yaml b/tests/integration/bundles/simple_cluster/config.yaml new file mode 100644 index 00000000..d0bb42ce --- /dev/null +++ b/tests/integration/bundles/simple_cluster/config.yaml @@ -0,0 +1,3 @@ +- type: cluster + name: Simple Cluster + version: 2 \ No newline at end of file diff --git a/tests/integration/bundles/simple_hostprovider/actions.yaml b/tests/integration/bundles/simple_hostprovider/actions.yaml new file mode 100644 index 00000000..95d47d24 --- /dev/null +++ b/tests/integration/bundles/simple_hostprovider/actions.yaml @@ -0,0 +1,16 @@ +- name: letsgo + hosts: localhost + connection: local + gather_facts: no + + tasks: + - name: Success + debug: + msg: "successful step" + tags: [ok] + + - name: Fail + fail: + msg: "failed step" + tags: [fail] + diff --git a/tests/integration/bundles/simple_hostprovider/config.yaml b/tests/integration/bundles/simple_hostprovider/config.yaml new file mode 100644 index 00000000..12e0f7f8 --- /dev/null +++ b/tests/integration/bundles/simple_hostprovider/config.yaml @@ -0,0 +1,20 @@ +- type: provider + name: simple_provider + version: 4 + + actions: &actions + success: &job + display_name: I will survive + type: job + script_type: ansible + script: ./actions.yaml + params: + ansible_tags: ok + masking: {} + + +- type: host + name: simple_host + version: 2 + + actions: *actions diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 00000000..3aa53fc3 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,83 @@ +from pathlib import Path +from typing import AsyncGenerator, Generator +from urllib.parse import urljoin +import random +import string + +from httpx import AsyncClient +from testcontainers.core.network import Network +import pytest +import pytest_asyncio + +from adcm_aio_client._session import ADCMSession +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.objects.cm import Bundle +from adcm_aio_client.core.types import Credentials +from tests.integration.bundle import pack_bundle +from tests.integration.setup_environment import ( + DB_USER, + ADCMContainer, + ADCMPostgresContainer, + DatabaseInfo, + adcm_image_name, + postgres_image_name, +) + +BUNDLES = Path(__file__).parent / "bundles" + + +@pytest.fixture(scope="session") +def network() -> Generator[Network, None, None]: + with Network() as network: + yield network + + +@pytest.fixture(scope="session") +def postgres(network: Network) -> Generator[ADCMPostgresContainer, None, None]: + with ADCMPostgresContainer(image=postgres_image_name, network=network) as container: + yield container + + +@pytest.fixture(scope="function") +def adcm(network: Network, postgres: ADCMPostgresContainer) -> Generator[ADCMContainer, None, None]: + suffix = "".join(random.sample(string.ascii_letters, k=6)).lower() + db = DatabaseInfo(name=f"adcm_{suffix}", host=postgres.name) + postgres.execute_statement(f"CREATE DATABASE {db.name} OWNER {DB_USER}") + + with ADCMContainer(image=adcm_image_name, network=network, db=db) as container: + yield container + + postgres.execute_statement(f"DROP DATABASE {db.name}") + + +@pytest_asyncio.fixture(scope="function") +async def adcm_client(request: pytest.FixtureRequest, adcm: ADCMContainer) -> AsyncGenerator[ADCMClient, None]: + credentials = Credentials(username="admin", password="admin") # noqa: S106 + url = adcm.url + extra_kwargs = getattr(request, "param", {}) + kwargs: dict = {"timeout": 10, "retry_interval": 1, "retry_attempts": 1} | extra_kwargs + async with ADCMSession(url=url, credentials=credentials, **kwargs) as client: + yield client + + +@pytest_asyncio.fixture() +async def complex_cluster_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "complex_cluster", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + +@pytest_asyncio.fixture() +async def simple_hostprovider_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "simple_hostprovider", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + +@pytest_asyncio.fixture() +async def httpx_client(adcm: ADCMContainer) -> AsyncGenerator[AsyncClient, None]: + client = AsyncClient(base_url=urljoin(adcm.url, "api/v2/")) + response = await client.post("login/", json={"username": "admin", "password": "admin"}) + client.headers["X-CSRFToken"] = response.cookies["csrftoken"] + + yield client + + await client.aclose() diff --git a/tests/integration/setup_environment.py b/tests/integration/setup_environment.py new file mode 100644 index 00000000..1b7d5196 --- /dev/null +++ b/tests/integration/setup_environment.py @@ -0,0 +1,98 @@ +from dataclasses import dataclass +from typing import Self +import socket + +from docker.errors import DockerException +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.waiting_utils import wait_container_is_ready, wait_for_logs +from testcontainers.postgres import DbContainer, PostgresContainer + +postgres_image_name = "postgres:latest" +adcm_image_name = "hub.adsw.io/adcm/adcm:develop" +adcm_container_name = "test_adcm" +postgres_name = "test_pg_db" + +# for now runtime relies that those values are always used for their purpose +DB_USER = "adcm" +DB_PASSWORD = "password" # noqa: S105 + + +@dataclass(slots=True) +class DatabaseInfo: + name: str + + host: str + port: int = 5432 + + +def find_free_port(start: int, end: int) -> int: + """Try to find a free port in the given range.""" + for port in range(start, end): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + if s.connect_ex(("127.0.0.1", port)) != 0: # Port is free + return port + raise DockerContainerError(f"No free ports found in the range {start} to {end}") + + +class ADCMPostgresContainer(PostgresContainer): + def __init__(self: Self, image: str, network: Network) -> None: + super().__init__(image) + self.name = postgres_name + self.with_name(self.name) + self.with_network(network) + + def execute_statement(self: Self, statement: str) -> None: + exit_code, out = self.exec(f'psql --username test --dbname test -c "{statement}"') + if exit_code != 0: + output = out.decode("utf-8") + message = f"Failed to execute psql statement: {output}" + raise RuntimeError(message) + + def start(self: Self) -> DbContainer: + super().start() + + wait_container_is_ready(self) + wait_for_logs(self, "database system is ready to accept connections") + + self.execute_statement(f"CREATE USER {DB_USER} WITH ENCRYPTED PASSWORD '{DB_PASSWORD}'") + + return self + + +class ADCMContainer(DockerContainer): + url: str + + def __init__(self: Self, image: str, network: Network, db: DatabaseInfo) -> None: + super().__init__(image) + self._db = db + + self.with_network(network) + + self.with_env("STATISTICS_ENABLED", "0") + self.with_env("DB_USER", DB_USER) + self.with_env("DB_PASS", DB_PASSWORD) + self.with_env("DB_NAME", self._db.name) + self.with_env("DB_HOST", self._db.host) + self.with_env("DB_PORT", str(self._db.port)) + + def start(self: Self) -> Self: + adcm_port = find_free_port(start=8000, end=8080) + self.with_bind_ports(8000, adcm_port) + + self.with_name(f"{adcm_container_name}_{adcm_port}") + + super().start() + + wait_container_is_ready(self) + wait_for_logs(self, "Run Nginx ...") + + ip = self.get_container_host_ip() + port = self.get_exposed_port(8000) + self.url = f"http://{ip}:{port}" + + return self + + +class DockerContainerError(DockerException): + pass diff --git a/tests/integration/test_bundle.py b/tests/integration/test_bundle.py new file mode 100644 index 00000000..29ba52b4 --- /dev/null +++ b/tests/integration/test_bundle.py @@ -0,0 +1,134 @@ +from pathlib import Path +from unittest.mock import AsyncMock +import os + +import pytest +import pytest_asyncio + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.errors import ObjectDoesNotExistError +from adcm_aio_client.core.objects.cm import Bundle +from adcm_aio_client.core.requesters import BundleRetrieverInterface, DefaultRequester +from tests.integration.bundle import create_bundles_by_template, pack_bundle +from tests.integration.conftest import BUNDLES + +pytestmark = [pytest.mark.asyncio] + + +@pytest_asyncio.fixture() +async def load_bundles(adcm_client: ADCMClient, tmp_path: Path) -> list[Bundle]: + created_bundles = [] + for folder_path in BUNDLES.iterdir(): + folder_path = BUNDLES / folder_path + if folder_path.is_dir(): + (tmp_path / folder_path.name).mkdir() + bundle_path = pack_bundle(from_dir=folder_path, to=(tmp_path / folder_path)) + created_bundle = await adcm_client.bundles.create(source=bundle_path, accept_license=False) + created_bundles.append(created_bundle) + + return created_bundles + + +async def test_bundle(adcm_client: ADCMClient, load_bundles: list[Bundle], tmp_path: Path) -> None: # noqa: ARG001 + await _test_bundle_create_delete(adcm_client, tmp_path) + await _test_bundle_properties(adcm_client) + await _test_bundle_accessors(adcm_client) + await _test_pagination(adcm_client, tmp_path) + + +async def _test_bundle_create_delete(adcm_client: ADCMClient, tmp_path: Path) -> None: + bundle = await adcm_client.bundles.get(name__eq="cluster_with_license") + assert (await bundle.license).state == "unaccepted" + await bundle.delete() + + bundle_path = pack_bundle(from_dir=BUNDLES / "cluster_with_license", to=tmp_path) + bundle = await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + assert (await bundle.license).state == "accepted" + + await _test_download_external_bundle_success() + + +async def _test_bundle_accessors(adcm_client: ADCMClient) -> None: + bundle = await adcm_client.bundles.get(name__eq="cluster_with_license") + assert isinstance(bundle, Bundle) + assert bundle.name == "cluster_with_license" + + with pytest.raises(ObjectDoesNotExistError): + await adcm_client.bundles.get(name__eq="fake_bundle") + + assert not await adcm_client.bundles.get_or_none(name__eq="fake_bundle") + assert isinstance(await adcm_client.bundles.get_or_none(name__contains="cluster_with"), Bundle) + + bundles_list = await adcm_client.bundles.list() + bundles_all = await adcm_client.bundles.all() + assert isinstance(bundles_list, list) + assert len(bundles_all) == len(bundles_list) == len(os.listdir(BUNDLES)) + + bundles_list = await adcm_client.bundles.list(query={"limit": 2, "offset": 1}) + assert isinstance(bundles_list, list) + assert len(bundles_list) == 2 + + bundles_list = await adcm_client.bundles.list(query={"offset": len(os.listdir(BUNDLES)) + 1}) + assert isinstance(bundles_list, list) + assert len(bundles_list) == 0 + + async for b in adcm_client.bundles.iter(name__icontains="cluster"): + assert isinstance(b, Bundle) + assert "cluster" in b.name.lower() + + assert len(await adcm_client.bundles.filter(name__icontains="cluster")) < len(os.listdir(BUNDLES)) + + +async def _test_bundle_properties(adcm_client: ADCMClient) -> None: + bundle = await adcm_client.bundles.get(name__eq="cluster_with_license") + assert bundle.name == "cluster_with_license" + assert (await bundle.license).state == "accepted" + assert "LICENSE AGREEMENT" in (await bundle.license).text + assert bundle.version == "2.0" + assert bundle.signature_status == "absent" + assert bundle.edition == "enterprise" + + await (await bundle.license).accept() + await bundle.refresh() + assert (await bundle.license).state == "accepted" + + +async def _test_download_external_bundle_success() -> None: + mock_requester = AsyncMock(spec=DefaultRequester) + mock_retriever = AsyncMock(spec=BundleRetrieverInterface) + url = "http://example.com/bundle.tar.gz" + + mock_retriever.download_external_bundle = AsyncMock(return_value=b"bundle content") + + adcm_client = ADCMClient(requester=mock_requester, bundle_retriever=mock_retriever, adcm_version="1.0") + + await adcm_client.bundles.create(source=url, accept_license=False) + + mock_retriever.download_external_bundle.assert_awaited_once_with(url) + + +async def _test_pagination(adcm_client: ADCMClient, tmp_path: Path) -> None: + await create_bundles_by_template( + adcm_client, + tmp_path, + BUNDLES / "simple_hostprovider", + target_name="name", + field_to_modify="simple_provider", + new_value="new_value", + number_of_bundles=55, + ) + bundles_list = await adcm_client.bundles.list() + assert len(bundles_list) == 50 + + bundles_list = await adcm_client.bundles.list(query={"offset": 55}) + assert len(bundles_list) == 6 + + bundles_list = await adcm_client.bundles.list(query={"offset": 61}) + assert len(bundles_list) == 0 + + bundles_list = await adcm_client.bundles.list(query={"limit": 10}) + assert len(bundles_list) == 10 + + assert len(await adcm_client.bundles.all()) == 61 + assert len(await adcm_client.bundles.filter()) == 61 diff --git a/tests/integration/test_cluster.py b/tests/integration/test_cluster.py new file mode 100644 index 00000000..812b89ab --- /dev/null +++ b/tests/integration/test_cluster.py @@ -0,0 +1,233 @@ +from pathlib import Path +from typing import Collection +import asyncio + +from httpx import AsyncClient +import pytest +import pytest_asyncio + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.config import ConfigHistoryNode, ObjectConfig +from adcm_aio_client.core.errors import MultipleObjectsReturnedError, ObjectDoesNotExistError +from adcm_aio_client.core.mapping import ClusterMapping +from adcm_aio_client.core.objects._imports import Imports +from adcm_aio_client.core.objects.cm import Bundle, Cluster +from tests.integration.bundle import pack_bundle +from tests.integration.conftest import BUNDLES + +pytestmark = [pytest.mark.asyncio] + + +async def get_ansible_forks(httpx_client: AsyncClient, cluster: Cluster) -> int: + ansible_cfg_url = f"clusters/{cluster.id}/ansible-config/" + response = await httpx_client.get(ansible_cfg_url) + assert response.status_code == 200 + + return response.json()["config"]["defaults"]["forks"] + + +async def update_cluster_name(httpx_client: AsyncClient, cluster: Cluster, new_name: str) -> None: + cluster_url = f"clusters/{cluster.id}/" + response = await httpx_client.patch(cluster_url, json={"name": new_name}) + assert response.status_code == 200 + + +async def assert_cluster(cluster: Cluster, expected: dict, httpx_client: AsyncClient) -> None: + cluster_url = f"clusters/{cluster.id}/" + response = await httpx_client.get(cluster_url) + assert response.status_code == 200 + + response = response.json() + for attr, value in expected.items(): + assert response[attr] == value + + await cluster.delete() + response = await httpx_client.get(cluster_url) + assert response.status_code == 404 + + +def assert_clusters_collection(clusters: Collection[Cluster], expected_amount: int) -> None: + assert all(isinstance(cluster, Cluster) for cluster in clusters) + assert len({cluster.id for cluster in clusters}) == expected_amount + assert len({id(cluster) for cluster in clusters}) == expected_amount + + +@pytest_asyncio.fixture() +async def complex_cluster_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "complex_cluster", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + +@pytest_asyncio.fixture() +async def many_complex_clusters(adcm_client: ADCMClient, complex_cluster_bundle: Bundle) -> int: + """ + Creates 51 clusters (2 pages, if response's page size is 50) + with name pattern `Test-cluster-N` and one `Very special cluster` + """ + + num_similar_clusters = 50 + coros = ( + adcm_client.clusters.create(bundle=complex_cluster_bundle, name=f"Test-cluster-{i + 1}") + for i in range(num_similar_clusters) + ) + special_cluster_coro = adcm_client.clusters.create(bundle=complex_cluster_bundle, name="Very special cluster") + await asyncio.gather(*coros, special_cluster_coro) + + return num_similar_clusters + 1 + + +@pytest_asyncio.fixture() +async def simple_cluster_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "simple_cluster", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + +@pytest_asyncio.fixture() +async def simple_cluster(adcm_client: ADCMClient, simple_cluster_bundle: Bundle) -> Cluster: + return await adcm_client.clusters.create(bundle=simple_cluster_bundle, name="Simple cluster") + + +async def test_cluster( + adcm_client: ADCMClient, + complex_cluster_bundle: Bundle, + many_complex_clusters: int, + simple_cluster_bundle: Bundle, + simple_cluster: Cluster, # for filtering by bundle + httpx_client: AsyncClient, +) -> None: + _ = simple_cluster + num_clusters = many_complex_clusters + 1 # + simple_cluster + + await _test_cluster_create_delete_api( + adcm_client=adcm_client, bundle=complex_cluster_bundle, httpx_client=httpx_client + ) + + await _test_clusters_node( + adcm_client=adcm_client, + complex_bundle=complex_cluster_bundle, + num_clusters=num_clusters, + simple_bundle=simple_cluster_bundle, + ) + + cluster = await adcm_client.clusters.get(name__eq="Very special cluster") + await _test_cluster_object_api(httpx_client=httpx_client, cluster=cluster, cluster_bundle=complex_cluster_bundle) + + +async def _test_cluster_create_delete_api(adcm_client: ADCMClient, bundle: Bundle, httpx_client: AsyncClient) -> None: + name = "Test-cluster" + description = "des\ncription" + cluster = await adcm_client.clusters.create(bundle=bundle, name=name, description=description) + + expected = {"id": cluster.id, "name": name, "description": description} + await assert_cluster(cluster, expected, httpx_client) + + # without optional arguments + name = "Another-test-cluster" + cluster = await adcm_client.clusters.create(bundle=bundle, name=name) + + expected = {"id": cluster.id, "name": name, "description": ""} + await assert_cluster(cluster, expected, httpx_client) + + +async def _test_clusters_node( + adcm_client: ADCMClient, complex_bundle: Bundle, num_clusters: int, simple_bundle: Bundle +) -> None: + no_objects_msg = "^No objects found with the given filter.$" + multiple_objects_msg = "^More than one object found.$" + + # get + assert isinstance(await adcm_client.clusters.get(name__eq="Very special cluster"), Cluster) + + with pytest.raises(ObjectDoesNotExistError, match=no_objects_msg): + await adcm_client.clusters.get(name__eq="Not so special cluster") + + with pytest.raises(MultipleObjectsReturnedError, match=multiple_objects_msg): + await adcm_client.clusters.get(name__in=["Test-cluster-1", "Test-cluster-2"]) + + # get_or_none + assert isinstance(await adcm_client.clusters.get_or_none(name__eq="Test-cluster-3"), Cluster) + + assert await adcm_client.clusters.get_or_none(name__eq="Not so special cluster") is None + + with pytest.raises(MultipleObjectsReturnedError, match=multiple_objects_msg): + await adcm_client.clusters.get_or_none(name__in=["Very special cluster", "Test-cluster-2"]) + + # all + all_clusters = await adcm_client.clusters.all() + assert_clusters_collection(clusters=all_clusters, expected_amount=num_clusters) + + # list + page_size = 50 + assert page_size < num_clusters, "check page_size or number of clusters" + + first_page_clusters = await adcm_client.clusters.list() + assert_clusters_collection(clusters=first_page_clusters, expected_amount=page_size) + + # iter + iter_clusters = set() + async for cluster in adcm_client.clusters.iter(): + iter_clusters.add(cluster) + assert_clusters_collection(clusters=iter_clusters, expected_amount=num_clusters) + + # filter + # complex_bundle: "Test-cluster-N" - 50; "Very special cluster" - 1; + # simple_bundle: "Simple cluster" - 1 + filters_data = { + ("bundle__eq", simple_bundle): 1, + ("bundle__in", (complex_bundle, simple_bundle)): num_clusters, + ("bundle__ne", complex_bundle): 1, + ("bundle__exclude", (simple_bundle, complex_bundle)): 0, + ("name__eq", "Very special cluster"): 1, + ("name__ieq", "VERY SPECIAL cluster"): 1, + ("name__ne", "Simple cluster"): num_clusters - 1, + ("name__ine", "SIMPLE CLUSTER"): num_clusters - 1, + ("name__in", ("Test-cluster-1", "Test-cluster-2", "TEST-cluster-3", "Not a cluster")): 2, + ("name__iin", ("TEST-cluster-1", "Test-CLUSTER-2", "SIMPLE CLUSTER")): 3, + ("name__exclude", ("Test-cluster-1", "Test-cluster-2", "Not a cluster")): num_clusters - 2, + ("name__iexclude", ("VERY special CLUSTER", "Not a cluster")): num_clusters - 1, + ("name__contains", "special"): 1, + ("name__icontains", "-ClUsTeR-"): num_clusters - 2, + ("status__eq", "up"): 0, + ("status__eq", "down"): num_clusters, + ("status__in", ("down", "some status")): num_clusters, + ("status__in", ("up", "some status")): 0, + ("status__ne", "down"): 0, + ("status__ne", "up"): num_clusters, + ("status__exclude", ("excluded_status", "down")): 0, + ("status__exclude", ("excluded_status", "up")): num_clusters, + ("status__exclude", ("up", "down")): 0, + } + for filter_, expected in filters_data.items(): + filter_value = {filter_[0]: filter_[1]} + clusters = await adcm_client.clusters.filter(**filter_value) + assert len(clusters) == expected, f"Filter: {filter_value}" + + +async def _test_cluster_object_api(httpx_client: AsyncClient, cluster: Cluster, cluster_bundle: Bundle) -> None: + assert isinstance(cluster.id, int) + assert isinstance(cluster.name, str) + assert isinstance(cluster.description, str) + + bundle = await cluster.bundle + assert isinstance(bundle, Bundle) + assert bundle.id == cluster_bundle.id + + assert isinstance(await cluster.get_status(), str) + assert isinstance(await cluster.actions.all(), list) + assert isinstance(await cluster.upgrades.all(), list) + assert isinstance(await cluster.config_host_groups.all(), list) + assert isinstance(await cluster.action_host_groups.all(), list) + assert isinstance(await cluster.config, ObjectConfig) + assert isinstance(cluster.config_history, ConfigHistoryNode) + assert isinstance(await cluster.mapping, ClusterMapping) + assert isinstance(await cluster.imports, Imports) + + initial_ansible_forks = await get_ansible_forks(httpx_client, cluster) + await cluster.set_ansible_forks(value=initial_ansible_forks + 5) + assert await get_ansible_forks(httpx_client, cluster) == initial_ansible_forks + 5 + + new_name = "New cluster name" + await update_cluster_name(httpx_client, cluster, new_name) + assert cluster.name != new_name + await cluster.refresh() + assert cluster.name == new_name diff --git a/tests/integration/test_component.py b/tests/integration/test_component.py new file mode 100644 index 00000000..ed76d5e2 --- /dev/null +++ b/tests/integration/test_component.py @@ -0,0 +1,167 @@ +from pathlib import Path +from typing import Collection +import random +import string + +import pytest +import pytest_asyncio + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.config import ConfigHistoryNode, ObjectConfig +from adcm_aio_client.core.errors import MultipleObjectsReturnedError, ObjectDoesNotExistError +from adcm_aio_client.core.filters import Filter +from adcm_aio_client.core.objects.cm import Cluster, Component, Service +from tests.integration.bundle import pack_bundle +from tests.integration.yaml import create_yaml + +pytestmark = [pytest.mark.asyncio] + + +def prepare_bundle_data() -> list[dict]: + config = [{"name": "string_field", "type": "string", "default": "string_field value"}] + + component_data = {"config": config} + fifty_components = {f"generated_component_{i + 1}": component_data for i in range(50)} + special_component = {"special_component": component_data} + + return [ + { + "type": "cluster", + "name": "Generated cluster", + "version": 1, + }, + { + "type": "service", + "name": "Service", + "version": 1.0, + "config": config, + "components": {**fifty_components, **special_component}, + }, + ] + + +def assert_components_collection(components: Collection[Component], expected_amount: int) -> None: + assert all(isinstance(component, Component) for component in components) + assert len({component.id for component in components}) == expected_amount + assert len({id(component) for component in components}) == expected_amount + + +@pytest_asyncio.fixture() +async def service_51_components(adcm_client: ADCMClient, tmp_path: Path) -> Service: + config_yaml_path = tmp_path / "".join(random.sample(string.ascii_letters, k=6)).lower() / "config.yaml" + create_yaml(data=prepare_bundle_data(), path=config_yaml_path) + + bundle_path = pack_bundle(from_dir=config_yaml_path.parent, to=tmp_path) + bundle = await adcm_client.bundles.create(source=bundle_path) + cluster = await adcm_client.clusters.create(bundle=bundle, name="Test cluster 52") + + return (await cluster.services.add(filter_=Filter(attr="name", op="eq", value="Service")))[0] + + +async def test_component_api(service_51_components: Service) -> None: + service = service_51_components + num_components = 51 + + await _test_component_node(service=service, num_components=num_components) + + component = await service.components.get(name__eq="special_component") + await _test_component_object_api(component=component, parent_service=service) + + +async def _test_component_node(service: Service, num_components: int) -> None: + no_objects_msg = "^No objects found with the given filter.$" + multiple_objects_msg = "^More than one object found.$" + + # get + assert isinstance(await service.components.get(name__eq="special_component"), Component) + + with pytest.raises(ObjectDoesNotExistError, match=no_objects_msg): + await service.components.get(name__eq="some_component") + + with pytest.raises(MultipleObjectsReturnedError, match=multiple_objects_msg): + await service.components.get(name__in=["generated_component_1", "generated_component_2"]) + + # get_or_none + assert isinstance(await service.components.get_or_none(name__eq="generated_component_30"), Component) + + assert await service.components.get_or_none(name__eq="some_component") is None + + with pytest.raises(MultipleObjectsReturnedError, match=multiple_objects_msg): + await service.components.get_or_none(name__in=["generated_component_1", "generated_component_11"]) + + # all + all_components = await service.components.all() + assert_components_collection(components=all_components, expected_amount=num_components) + + # list + page_size = 50 + assert page_size < num_components, "check page_size or number of components" + + first_page_components = await service.components.list() + assert_components_collection(components=first_page_components, expected_amount=page_size) + + # iter + iter_components = set() + async for component in service.components.iter(): + iter_components.add(component) + assert_components_collection(components=iter_components, expected_amount=num_components) + + # filter + name_filters_data = { + ("name__eq", "generated_component_8"): 1, + ("name__ieq", "gEnErAtEd_CoMpOnEnT_18"): 1, + ("name__ne", "generated_component_2"): num_components - 1, + ("name__ine", "GENERATED_component_2"): num_components - 1, + ( + "name__in", + ("generated_component_20", "generated_component_21", "GENERATED_COMPONENT_22", "Not a component"), + ): 2, + ( + "name__iin", + ("generated_component_20", "generated_component_21", "GENERATED_COMPONENT_22", "Not a component"), + ): 3, + ("name__exclude", ("generated_component_20", "generated_component_21", "Not a component")): num_components - 2, + ("name__iexclude", ("GENERATED_COMPONENT_22", "Not a component")): num_components - 1, + ("name__contains", "38"): 1, + ("name__contains", "omponen"): num_components, + ("name__icontains", "_coMPON"): num_components, + } + display_name_filters_data = { # display_names are the same as names + (f"display_{filter_[0]}", filter_[1]): expected for filter_, expected in name_filters_data.items() + } + + filters_data = { + **name_filters_data, + **display_name_filters_data, + ("status__eq", "up"): 0, + ("status__eq", "down"): num_components, + ("status__in", ("down", "some status")): num_components, + ("status__in", ("up", "some status")): 0, + ("status__ne", "down"): 0, + ("status__ne", "up"): num_components, + ("status__exclude", ("excluded_status", "down")): 0, + ("status__exclude", ("excluded_status", "up")): num_components, + ("status__exclude", ("up", "down")): 0, + } + for filter_, expected in filters_data.items(): + filter_value = {filter_[0]: filter_[1]} + components = await service.components.filter(**filter_value) + assert len(components) == expected, f"Filter: {filter_value}" + + +async def _test_component_object_api(component: Component, parent_service: Service) -> None: + assert isinstance(component.id, int) + assert isinstance(component.name, str) + assert isinstance(component.display_name, str) + assert isinstance(await component.constraint, list) + assert isinstance(component.service, Service) + assert isinstance(component.cluster, Cluster) + assert component.service.id == parent_service.id + assert component.cluster.id == parent_service.cluster.id + assert isinstance(await component.hosts.all(), list) + assert isinstance(await component.get_status(), str) + assert isinstance(await component.actions.all(), list) + assert isinstance(await component.config, ObjectConfig) + assert isinstance(component.config_history, ConfigHistoryNode) + assert isinstance(await component.config_host_groups.all(), list) + assert isinstance(await component.action_host_groups.all(), list) diff --git a/tests/integration/test_config.py b/tests/integration/test_config.py new file mode 100644 index 00000000..c4d24220 --- /dev/null +++ b/tests/integration/test_config.py @@ -0,0 +1,211 @@ +from pathlib import Path + +import pytest +import pytest_asyncio + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.config import ActivatableParameterGroup, Parameter, ParameterGroup +from adcm_aio_client.core.config.refresh import apply_local_changes, apply_remote_changes +from adcm_aio_client.core.errors import ConfigNoParameterError +from adcm_aio_client.core.filters import Filter +from adcm_aio_client.core.objects.cm import Bundle, Cluster, Service +from tests.integration.bundle import pack_bundle +from tests.integration.conftest import BUNDLES + +pytestmark = [pytest.mark.asyncio] + + +@pytest_asyncio.fixture() +async def cluster_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "complex_cluster", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + +@pytest_asyncio.fixture() +async def cluster(adcm_client: ADCMClient, cluster_bundle: Bundle) -> Cluster: + cluster = await adcm_client.clusters.create(bundle=cluster_bundle, name="Awesome Cluster") + await cluster.services.add(filter_=Filter(attr="name", op="eq", value="complex_config")) + return cluster + + +async def get_service_with_config(cluster: Cluster) -> Service: + return await cluster.services.get(name__eq="complex_config") + + +async def test_invisible_fields(cluster: Cluster) -> None: + expected_error = ConfigNoParameterError + + service = await get_service_with_config(cluster) + config = await service.config + + # invisible fields can't be found via `__getitem__` interface + + with pytest.raises(expected_error): + config["cant_find"] + + group = config["A lot of text", ParameterGroup] + with pytest.raises(expected_error): + group["cantCme"] + + # non initialized structure-based group + structure_group = group["sag", ParameterGroup] + inner_group = structure_group["nested", ParameterGroup] + with pytest.raises(expected_error): + inner_group["tech"] + + # they aren't displayed in difference + + # this change uses "internal" implementation + # and isn't supposed to be used in production code + data = config.data._values + data["very_important_flag"] = 2 + data["cant_find"] = "changed value" + data["a_lot_of_text"]["cant_find"] = "also changed" + + await config.save() + + first_config = await service.config_history[0] + second_config = await service.config_history[-1] + + diff = first_config.difference(second_config) + assert len(diff._values) == 1 + assert ("very_important_flag",) in diff._values + assert first_config.data._values["cant_find"] != second_config.data._values["cant_find"] + + +async def test_structure_groups(cluster: Cluster) -> None: + service = await get_service_with_config(cluster) + config = await service.config + group = config["A lot of text"] + assert isinstance(group, ParameterGroup) + group_like = group["Group-like structure"] + # structure with "dict" root is a group + assert isinstance(group_like, ParameterGroup) + assert isinstance(group_like["quantity"], Parameter) + nested_group = group_like["nested"] + assert isinstance(nested_group, ParameterGroup) + nested_group["attr", Parameter].set("something") + nested_group["op", Parameter].set("good") + + +async def test_config(cluster: Cluster) -> None: + # save two configs for later refresh usage + service = await get_service_with_config(cluster) + config_1 = await service.config_history.current() + config_2 = await service.config_history.current() + + # change and save + + config = await service.config + + required_value = 100 + codes_value = [{"country": "Unknown", "code": 32}] + multiline_value = "A lot of text\nOn multiple lines\n\tAnd it's perfectly fine\n" + secret_map_value = {"pass1": "verysecret", "pass2": "evenmoresecret"} + + field = config["Set me"] + assert isinstance(field, Parameter) + assert field.value is None + field.set(required_value) + assert field.value == required_value + assert field.value == config["very_important_flag", Parameter].value + + field = config["country_codes"] + # structure with "list" root is a parameter + assert isinstance(field, Parameter) + assert isinstance(field.value, list) + assert all(isinstance(e, dict) for e in field.value) + field.set(codes_value) + + group = config["A lot of text"] + assert isinstance(group, ParameterGroup) + + field = group["big_text"] + assert isinstance(field, Parameter) + assert field.value is None + field.set(multiline_value) + assert field.value == multiline_value + + field = config["from_doc", ParameterGroup]["Map Secrets"] + assert isinstance(field, Parameter) + assert field.value is None + field.set(secret_map_value) + + config["agroup", ActivatableParameterGroup].activate() + + pre_save_id = config.id + + await config.save() + + assert config.id != pre_save_id + assert config_1.id == pre_save_id + assert config_2.id == pre_save_id + + # check values are updated, so values are encrypted coming from server + field = config["from_doc", ParameterGroup]["Map Secrets"] + assert field.value.keys() == secret_map_value.keys() # type: ignore + assert field.value.values() != secret_map_value.values() # type: ignore + + # refresh + + non_conflicting_value_1 = 200 + non_conflicting_value_2 = "megapass" + conflict_value_1 = "very fun\n" + conflict_value_2 = 43.2 + + for config_ in (config_1, config_2): + config_["Complexity Level", Parameter].set(non_conflicting_value_1) + group_ = config_["a_lot_of_text", ParameterGroup] + group_["pass", Parameter].set(non_conflicting_value_2) + group_["big_text", Parameter].set(conflict_value_1) + config_["Set me", Parameter].set(conflict_value_2) + + await config_1.refresh(strategy=apply_local_changes) + + config_ = config_1 + assert config_.id == config.id + assert config_["Complexity Level", Parameter].value == non_conflicting_value_1 + assert config_["Set me", Parameter].value == conflict_value_2 + group_ = config_["a_lot_of_text", ParameterGroup] + assert group_["pass", Parameter].value == non_conflicting_value_2 + assert group_["big_text", Parameter].value == conflict_value_1 + secret_map = config_["from_doc", ParameterGroup]["Map Secrets", Parameter] + assert isinstance(secret_map.value, dict) + assert secret_map.value.keys() == secret_map_value.keys() + assert config_["country_codes", Parameter].value == codes_value + + await config_2.refresh(strategy=apply_remote_changes) + + config_ = config_2 + assert config_.id == config.id + assert config_.id == config.id + assert config_["Complexity Level", Parameter].value == non_conflicting_value_1 + assert config_["Set me", Parameter].value == required_value + group_ = config_["a_lot_of_text", ParameterGroup] + assert group_["pass", Parameter].value == non_conflicting_value_2 + assert group_["big_text", Parameter].value == multiline_value + secret_map = config_["from_doc", ParameterGroup]["Map Secrets", Parameter] + assert isinstance(secret_map.value, dict) + assert secret_map.value.keys() == secret_map_value.keys() + assert config_["country_codes", Parameter].value == codes_value + + # history + + config_1["agroup", ActivatableParameterGroup].deactivate() + + await config_1.save() + + assert config_1.id != config.id + + latest_config = await service.config_history[-1] + earliest_config = await service.config_history[0] + + assert latest_config.id == config_1.id + assert earliest_config.id == pre_save_id + + diff = latest_config.difference(earliest_config) + # group was activated, then deactivated, so returned to initial state + # => no diff + assert len(diff._attributes) == 0 + # field values changed from earliest to latest + assert len(diff._values) == 6 diff --git a/tests/integration/test_host.py b/tests/integration/test_host.py new file mode 100644 index 00000000..47908317 --- /dev/null +++ b/tests/integration/test_host.py @@ -0,0 +1,138 @@ +from pathlib import Path + +import pytest +import pytest_asyncio + +from adcm_aio_client.core.actions import ActionsAccessor +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.errors import MultipleObjectsReturnedError, ObjectDoesNotExistError +from adcm_aio_client.core.filters import Filter +from adcm_aio_client.core.objects.cm import ( + Bundle, + Cluster, + Host, + HostProvider, +) +from tests.integration.bundle import pack_bundle +from tests.integration.conftest import BUNDLES + +pytestmark = [pytest.mark.asyncio] + + +@pytest_asyncio.fixture() +async def cluster_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "complex_cluster", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path) + + +@pytest_asyncio.fixture() +async def cluster(adcm_client: ADCMClient, cluster_bundle: Bundle) -> Cluster: + return await adcm_client.clusters.create(bundle=cluster_bundle, name="Cluster", description="Cluster description") + + +@pytest_asyncio.fixture() +async def hostprovider_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "complex_provider", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path) + + +@pytest_asyncio.fixture() +async def hostprovider(adcm_client: ADCMClient, hostprovider_bundle: Bundle) -> HostProvider: + return await adcm_client.hostproviders.create( + bundle=hostprovider_bundle, name="Hostprovider name", description="Hostprovider description" + ) + + +async def test_host(adcm_client: ADCMClient, hostprovider: HostProvider, cluster: Cluster) -> None: + await _test_host_properties(adcm_client, hostprovider, cluster) + await _test_host_accessors(adcm_client, hostprovider, cluster) + await _test_pagination(adcm_client, hostprovider, cluster) + + +async def _test_host_properties(adcm_client: ADCMClient, hostprovider: HostProvider, cluster: Cluster) -> None: + await adcm_client.hosts.create(name="test-host", description="host description", hostprovider=hostprovider) + await cluster.hosts.add(host=await adcm_client.hosts.get(name__eq="test-host")) + + host = await adcm_client.hosts.get() + assert host.name == "test-host" + assert (await host.hostprovider).name == hostprovider.name + assert (await host.cluster).name == cluster.name # pyright: ignore[reportOptionalMemberAccess] + assert isinstance(host.actions, ActionsAccessor) + assert await host.get_status() == "down" + assert (await host.maintenance_mode).value == "off" + + +async def _test_host_accessors(adcm_client: ADCMClient, hostprovider: HostProvider, cluster: Cluster) -> None: + for new_host in ["host-1", "host-2", "host-3"]: + await adcm_client.hosts.create(name=new_host, description="host description", hostprovider=hostprovider) + + host = await adcm_client.hosts.get(name__eq="host-1") + assert isinstance(host, Host) + assert host.name == "host-1" + + with pytest.raises(ObjectDoesNotExistError): + await adcm_client.hosts.get(name__eq="fake_host") + + with pytest.raises(MultipleObjectsReturnedError): + await adcm_client.hosts.get(name__contains="host") + + assert not await adcm_client.hosts.get_or_none(name__eq="fake_host") + assert isinstance(await adcm_client.hosts.get_or_none(name__contains="-1"), Host) + + assert len(await adcm_client.hosts.all()) == len(await adcm_client.hosts.list()) == 4 + + hosts_list = await adcm_client.hosts.list(query={"limit": 2, "offset": 1}) + assert isinstance(hosts_list, list) + assert len(hosts_list) == 2 + + hosts_list = await adcm_client.hosts.list(query={"offset": 4}) + assert isinstance(hosts_list, list) + assert len(hosts_list) == 0 + + async for h in adcm_client.hosts.iter(): + assert isinstance(h, Host) + assert "host" in h.name + + await cluster.hosts.add(host=await adcm_client.hosts.get(name__eq="host-1")) + await cluster.hosts.add(host=Filter(attr="name", op="eq", value="host-2")) + + assert len(await cluster.hosts.all()) == 3 + + await cluster.hosts.remove(host=await adcm_client.hosts.get(name__eq="host-1")) + + assert len(await cluster.hosts.all()) == 2 + + host = await adcm_client.hosts.get(name__icontains="T-1") + await host.delete() + + +async def _test_pagination(adcm_client: ADCMClient, hostprovider: HostProvider, cluster: Cluster) -> None: + for i in range(55): + await adcm_client.hosts.create( + hostprovider=hostprovider, + cluster=cluster, + name=f"hostname-{i}", + ) + + hosts_list = await adcm_client.hosts.list() + cluster_hosts_list = await cluster.hosts.list() + assert len(hosts_list) == len(cluster_hosts_list) == 50 + + hosts_list = await adcm_client.hosts.list(query={"offset": 55}) + cluster_hosts_list = await cluster.hosts.list(query={"offset": 55}) + assert len(hosts_list) == 3 + assert len(cluster_hosts_list) == 2 + + hosts_list = await adcm_client.hosts.list(query={"offset": 60}) + cluster_hosts_list = await cluster.hosts.list(query={"offset": 60}) + assert len(hosts_list) == len(cluster_hosts_list) == 0 + + hosts_list = await adcm_client.hosts.list(query={"limit": 10}) + cluster_hosts_list = await cluster.hosts.list(query={"limit": 10}) + assert len(hosts_list) == len(cluster_hosts_list) == 10 + + assert len(await adcm_client.hosts.all()) == 58 + assert len(await cluster.hosts.all()) == 57 + + assert len(await adcm_client.hosts.filter()) == 58 + assert len(await cluster.hosts.filter()) == 57 diff --git a/tests/integration/test_hostprovider.py b/tests/integration/test_hostprovider.py new file mode 100644 index 00000000..bab65fcb --- /dev/null +++ b/tests/integration/test_hostprovider.py @@ -0,0 +1,102 @@ +from pathlib import Path + +import pytest +import pytest_asyncio + +from adcm_aio_client.core.actions import ActionsAccessor, UpgradeNode +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.config import ( + ConfigHistoryNode, + ObjectConfig, +) +from adcm_aio_client.core.errors import MultipleObjectsReturnedError, ObjectDoesNotExistError +from adcm_aio_client.core.objects.cm import Bundle, HostProvider +from tests.integration.bundle import pack_bundle +from tests.integration.conftest import BUNDLES + +pytestmark = [pytest.mark.asyncio] + + +@pytest_asyncio.fixture() +async def hostprovider_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "complex_provider", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path) + + +async def test_hostprovider(adcm_client: ADCMClient, hostprovider_bundle: Bundle) -> None: + await _test_hostprovider_properties(adcm_client, hostprovider_bundle) + await _test_hostprovider_accessors(adcm_client, hostprovider_bundle) + await _test_pagination(adcm_client, hostprovider_bundle) + + +async def _test_hostprovider_properties(adcm_client: ADCMClient, hostprovider_bundle: Bundle) -> None: + hostprovider = await adcm_client.hostproviders.create( + bundle=hostprovider_bundle, name="Hostprovider name", description="Hostprovider description" + ) + assert hostprovider.display_name == "complex_provider" + assert hostprovider.name == "Hostprovider name" + assert hostprovider.description == "Hostprovider description" + assert isinstance(hostprovider.actions, ActionsAccessor) + assert isinstance(await hostprovider.config, ObjectConfig) + assert isinstance(hostprovider.config_history, ConfigHistoryNode) + assert isinstance(hostprovider.upgrades, UpgradeNode) + hosts = await hostprovider.hosts.all() + assert len(hosts) == 0 + + +async def _test_hostprovider_accessors(adcm_client: ADCMClient, hostprovider_bundle: Bundle) -> None: + for new_host_provider in ["hostprovider-1", "hostprovider-2", "hostprovider-3"]: + await adcm_client.hostproviders.create( + bundle=hostprovider_bundle, name=new_host_provider, description=new_host_provider + ) + + hostprovider = await adcm_client.hostproviders.get(name__eq="hostprovider-1") + assert isinstance(hostprovider, HostProvider) + assert hostprovider.name == "hostprovider-1" + + with pytest.raises(ObjectDoesNotExistError): + await adcm_client.hostproviders.get(name__eq="fake_hostprovider") + + with pytest.raises(MultipleObjectsReturnedError): + await adcm_client.hostproviders.get(name__icontains="pr") + + assert not await adcm_client.hostproviders.get_or_none(name__eq="fake_hostprovider") + assert isinstance(await adcm_client.hostproviders.get_or_none(name__contains="hostprovider-1"), HostProvider) + + assert len(await adcm_client.hostproviders.all()) == len(await adcm_client.hostproviders.list()) == 4 + + hostproviders_list = await adcm_client.hostproviders.list(query={"limit": 2, "offset": 1}) + assert isinstance(hostproviders_list, list) + assert len(hostproviders_list) == 2 + + hostproviders_list = await adcm_client.hostproviders.list(query={"offset": 4}) + assert isinstance(hostproviders_list, list) + assert len(hostproviders_list) == 0 + + async for hp in adcm_client.hostproviders.iter(): + assert isinstance(hp, HostProvider) + assert "hostprovider" in hp.name.lower() + + assert len(await adcm_client.hostproviders.filter(bundle__eq=hostprovider_bundle)) == 4 + + await hostprovider.delete() + + +async def _test_pagination(adcm_client: ADCMClient, bundle: Bundle) -> None: + for i in range(55): + await adcm_client.hostproviders.create(bundle=bundle, name=f"Hostprovider name {i}") + + hostproviders_list = await adcm_client.hostproviders.list() + assert len(hostproviders_list) == 50 + + hostproviders_list = await adcm_client.hostproviders.list(query={"offset": 55}) + assert len(hostproviders_list) == 3 + + hostproviders_list = await adcm_client.hostproviders.list(query={"offset": 60}) + assert len(hostproviders_list) == 0 + + hostproviders_list = await adcm_client.hostproviders.list(query={"limit": 10}) + assert len(hostproviders_list) == 10 + + assert len(await adcm_client.hostproviders.all()) == 58 + assert len(await adcm_client.hostproviders.filter()) == 58 diff --git a/tests/integration/test_jobs.py b/tests/integration/test_jobs.py new file mode 100644 index 00000000..4d1f0916 --- /dev/null +++ b/tests/integration/test_jobs.py @@ -0,0 +1,221 @@ +from datetime import datetime +from itertools import chain +from operator import attrgetter +import asyncio + +import pytest +import pytest_asyncio + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.filters import Filter, FilterValue +from adcm_aio_client.core.host_groups.action_group import ActionHostGroup +from adcm_aio_client.core.objects._common import WithActions +from adcm_aio_client.core.objects.cm import Bundle, Cluster, Component, Job +from adcm_aio_client.core.types import WithID + +pytestmark = [pytest.mark.asyncio] + + +async def is_running(job: Job) -> bool: + return await job.get_status() == "running" + + +async def run_non_blocking(target: WithActions, **filters: FilterValue) -> Job: + action = await target.actions.get(**filters) + action.blocking = False + return await action.run() + + +async def check_job_object(job: Job, object_: WithID) -> None: + expected_type = object_.__class__ + expected_id = object_.id + + actual_object = await job.object + + assert isinstance(actual_object, expected_type) + assert actual_object.id == expected_id + + +@pytest_asyncio.fixture() +async def prepare_environment( + adcm_client: ADCMClient, + complex_cluster_bundle: Bundle, + simple_hostprovider_bundle: Bundle, +) -> None: + cluster_bundle = complex_cluster_bundle + hostprovider_bundle = simple_hostprovider_bundle + + clusters: list[Cluster] = await asyncio.gather( + *(adcm_client.clusters.create(cluster_bundle, f"wow-{i}") for i in range(5)) + ) + hostproviders = await asyncio.gather( + *(adcm_client.hostproviders.create(hostprovider_bundle, f"yay-{i}") for i in range(5)) + ) + await asyncio.gather( + *(adcm_client.hosts.create(hp, f"host-{hp.name}-{i}") for i in range(5) for hp in hostproviders) + ) + hosts = await adcm_client.hosts.all() + + services = tuple( + chain.from_iterable( + await asyncio.gather( + *(cluster.services.add(Filter(attr="name", op="eq", value="with_actions")) for cluster in clusters) + ) + ) + ) + components = tuple(chain.from_iterable(await asyncio.gather(*(service.components.all() for service in services)))) + + host_groups = await asyncio.gather( + *( + object_.action_host_groups.create(name=f"ahg for {object_.__class__.__name__}") + for object_ in chain(clusters, services, components) + ) + ) + + for object_ in chain(clusters, services, components, hosts, hostproviders): + await run_non_blocking(object_, name__eq="success") + + for group in host_groups: + await run_non_blocking(group, name__in=["fail"]) + + +@pytest.mark.usefixtures("prepare_environment") +@pytest.mark.parametrize("adcm_client", [{"timeout": 60}], ids=["t60"], indirect=True) +async def test_jobs_api(adcm_client: ADCMClient) -> None: + await _test_basic_api(adcm_client) + await _test_job_object(adcm_client) + await _test_collection_fitlering(adcm_client) + + +async def _test_basic_api(adcm_client: ADCMClient) -> None: + cluster = await adcm_client.clusters.get(name__eq="wow-4") + service = await cluster.services.get(name__contains="action") + component = await service.components.get(display_name__icontains="wESo") + + action = await component.actions.get(display_name__ieq="Lots of me") + job = await action.run() + # depending on retrieval time it's "one of" + assert await job.get_status() in ("created", "running") + assert job.start_time is None + assert job.finish_time is None + assert (await job.action).id == action.id + + await job.wait(exit_condition=is_running, timeout=30, poll_interval=1) + assert job.start_time is None + await job.refresh() + assert isinstance(job.start_time, datetime) + assert job.finish_time is None + + target = await job.object + assert isinstance(target, Component) + assert target.id == component.id + assert target.service.id == component.service.id + + await job.wait(timeout=30, poll_interval=3) + + assert await job.get_status() == "success" + assert job.finish_time is None + await job.refresh() + assert isinstance(job.finish_time, datetime) + + +async def _test_job_object(adcm_client: ADCMClient) -> None: + cluster, *_ = await adcm_client.clusters.list(query={"limit": 1, "offset": 4}) + service = await cluster.services.get() + component = await service.components.get(name__eq="c2") + hostprovider, *_ = await adcm_client.hostproviders.list(query={"limit": 1, "offset": 2}) + host, *_ = await adcm_client.hosts.list(query={"limit": 1, "offset": 4}) + + host_group_1 = await service.action_host_groups.get() + host_group_2 = await component.action_host_groups.get() + + all_targets = (cluster, service, component, hostprovider, host, host_group_1, host_group_2) + + for target in all_targets: + jobs = await adcm_client.jobs.filter(object=target) + assert len(jobs) == 1, f"Amount of jobs is incorrect for {target}: {len(jobs)}. Expected 1" + job = jobs[0] + await check_job_object(job=job, object_=target) # type: ignore + + +async def _test_collection_fitlering(adcm_client: ADCMClient) -> None: + failed_jobs = 20 + services_amount = 5 + + for job in await adcm_client.jobs.all(): + await job.wait(timeout=60) + + jobs = await adcm_client.jobs.list() + assert len(jobs) == 50 + + jobs = await adcm_client.jobs.all() + total_jobs = len(jobs) + assert total_jobs > 50 + + cases = ( + # status + ("status__eq", "failed", failed_jobs), + ("status__ieq", "faiLed", failed_jobs), + ("status__ne", "success", failed_jobs), + ("status__ine", "succEss", failed_jobs), + ("status__in", ("failed", "success"), total_jobs), + ("status__iin", ("faIled", "sUcceSs"), total_jobs), + ("status__exclude", ("failed", "success"), 0), + ("status__iexclude", ("succesS",), failed_jobs), + # name + ("name__eq", "fail", failed_jobs), + ("name__ieq", "FaIl", failed_jobs), + ("name__ne", "fail", total_jobs - failed_jobs), + ("name__ine", "FaIl", total_jobs - failed_jobs), + ("name__in", ("success", "success_task"), total_jobs - failed_jobs), + ("name__iin", ("sUccEss", "success_Task"), total_jobs - failed_jobs), + ("name__exclude", ("success",), failed_jobs + 1), + ("name__iexclude", ("success",), failed_jobs + 1), + ("name__contains", "il", failed_jobs), + ("name__icontains", "I", failed_jobs), + # display_name + ("display_name__eq", "no Way", failed_jobs), + ("display_name__ieq", "No way", failed_jobs), + ("display_name__ne", "no Way", total_jobs - failed_jobs), + ("display_name__ine", "No way", total_jobs - failed_jobs), + ("display_name__in", ("I will survive", "Lots Of me"), total_jobs - failed_jobs), + ("display_name__iin", ("i will survive", "lots of me"), total_jobs - failed_jobs), + ("display_name__exclude", ("I will survive",), failed_jobs + 1), + ("display_name__iexclude", ("i will survive",), failed_jobs + 1), + ("display_name__contains", "W", failed_jobs), + ("display_name__icontains", "W", total_jobs - 1), + ) + + for inline_filter, value, expected_amount in cases: + filter_ = {inline_filter: value} + result = await adcm_client.jobs.filter(**filter_) # type: ignore + actual_amount = len(result) + assert ( + actual_amount == expected_amount + ), f"Incorrect amount for {filter_=}\nExpected: {expected_amount}\nActual: {actual_amount}" + unique_entries = set(map(attrgetter("id"), result)) + assert len(unique_entries) == expected_amount + + cluster = await adcm_client.clusters.get(name__eq="wow-4") + service = await cluster.services.get() + service_ahg = await service.action_host_groups.get() + + fail_action = await service.actions.get(name__eq="fail") + success_action = await service.actions.get(name__eq="success") + + jobs = [job async for job in adcm_client.jobs.iter(action__eq=fail_action)] + assert len(jobs) == 5 + objects = [] + for job in jobs: + objects.append(await job.object) + assert all(isinstance(o, ActionHostGroup) for o in objects) + assert any(o.id == service_ahg.id for o in objects) + + job = await adcm_client.jobs.get_or_none(action__in=(fail_action, success_action), status__eq="notexist") + assert job is None + + jobs = await adcm_client.jobs.filter(action__ne=success_action) + assert len(jobs) == total_jobs - services_amount + + jobs = await adcm_client.jobs.filter(action__exclude=(success_action,)) + assert len(jobs) == total_jobs - services_amount diff --git a/tests/integration/test_mapping.py b/tests/integration/test_mapping.py new file mode 100644 index 00000000..6fac943d --- /dev/null +++ b/tests/integration/test_mapping.py @@ -0,0 +1,159 @@ +from collections.abc import Iterable +from itertools import chain +from pathlib import Path +import asyncio + +import pytest +import pytest_asyncio + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.filters import Filter +from adcm_aio_client.core.mapping.refresh import apply_local_changes, apply_remote_changes +from adcm_aio_client.core.mapping.types import MappingPair +from adcm_aio_client.core.objects.cm import Bundle, Cluster, Host +from tests.integration.bundle import pack_bundle +from tests.integration.conftest import BUNDLES + +pytestmark = [pytest.mark.asyncio] + +type FiveHosts = tuple[Host, Host, Host, Host, Host] + + +def build_name_mapping(*iterables: Iterable[MappingPair]) -> set[tuple[str, str, str]]: + return {(c.service.name, c.name, h.name) for c, h in chain.from_iterable(iterables)} + + +@pytest_asyncio.fixture() +async def cluster_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "complex_cluster", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + +@pytest_asyncio.fixture() +async def hostprovider_bundle(adcm_client: ADCMClient, tmp_path: Path) -> Bundle: + bundle_path = pack_bundle(from_dir=BUNDLES / "simple_hostprovider", to=tmp_path) + return await adcm_client.bundles.create(source=bundle_path, accept_license=True) + + +@pytest_asyncio.fixture() +async def cluster(adcm_client: ADCMClient, cluster_bundle: Bundle) -> Cluster: + cluster = await adcm_client.clusters.create(bundle=cluster_bundle, name="Awesome Cluster") + await cluster.services.add(filter_=Filter(attr="name", op="contains", value="example")) + return cluster + + +@pytest_asyncio.fixture() +async def hosts(adcm_client: ADCMClient, hostprovider_bundle: Bundle) -> FiveHosts: + hp = await adcm_client.hostproviders.create(bundle=hostprovider_bundle, name="Awesome HostProvider") + coros = (adcm_client.hosts.create(hostprovider=hp, name=f"host-{i+1}") for i in range(5)) + await asyncio.gather(*coros) + hosts = await adcm_client.hosts.all() + return tuple(hosts) # type: ignore[reportReturnType] + + +async def test_cluster_mapping(adcm_client: ADCMClient, cluster: Cluster, hosts: FiveHosts) -> None: + mapping = await cluster.mapping + + assert len(mapping.all()) == 0 + assert len(await mapping.hosts.all()) == 0 + assert len(await mapping.components.all()) == 6 + + await cluster.hosts.add(host=hosts) + host_1, host_2, host_3, host_4, host_5 = await mapping.hosts.all() + + service_1 = await cluster.services.get(display_name__eq="First Example") + service_2 = await cluster.services.get(name__eq="example_2") + + component_1_s1 = await service_1.components.get(name__eq="first") + component_2_s2 = await service_2.components.get(display_name__in=["Second Component"]) + + # local mapping editing + + await mapping.add(component=component_1_s1, host=host_1) + assert len(tuple(mapping.iter())) == 1 + + await mapping.add(component=(component_1_s1, component_2_s2), host=(host_1, host_3, host_4)) + assert len(mapping.all()) == 6 + + await mapping.remove(component=component_2_s2, host=(host_2, host_3)) + assert len(mapping.all()) == 5 + + await mapping.remove(component=(component_1_s1, component_2_s2), host=host_1) + assert len(mapping.all()) == 3 + + await mapping.add( + component=await mapping.components.filter(display_name__icontains="different"), + host=Filter(attr="name", op="in", value=(host_2.name, host_5.name)), + ) + assert len(mapping.all()) == 7 + + await mapping.remove( + component=await mapping.components.filter(display_name__icontains="different"), + host=Filter(attr="name", op="in", value=(host_2.name, host_5.name)), + ) + assert len(mapping.all()) == 3 + + mapping.empty() + assert mapping.all() == [] + + # saving + + all_components = await mapping.components.all() + + await mapping.add(component=all_components, host=host_5) + await mapping.add(component=component_1_s1, host=(host_2, host_3)) + await mapping.save() + + expected_mapping = build_name_mapping( + ((c, host_5) for c in all_components), ((component_1_s1, h) for h in (host_2, host_3)) + ) + actual_mapping = build_name_mapping(mapping.iter()) + assert actual_mapping == expected_mapping + + # refreshing + + cluster_alt = await adcm_client.clusters.get(name__eq=cluster.name) + mapping_alt = await cluster_alt.mapping + + assert build_name_mapping(mapping.iter()) == build_name_mapping(mapping_alt.iter()) + + component_3_s2 = await service_2.components.get(name__eq="third_one") + components_except_3_s2 = tuple(c for c in all_components if c.id != component_3_s2.id) + + await mapping_alt.remove(component_1_s1, host_3) + await mapping_alt.add(component_3_s2, (host_2, host_4)) + + await mapping.add((component_1_s1, component_3_s2), host_1) + await mapping.remove(component_3_s2, host_5) + + await mapping_alt.save() + + await mapping.refresh(strategy=apply_remote_changes) + + expected_mapping = build_name_mapping( + ((c, host_5) for c in components_except_3_s2), + ((component_1_s1, h) for h in (host_1, host_2)), + ((component_3_s2, h) for h in (host_1, host_2, host_4)), + ) + actual_mapping = build_name_mapping(mapping.iter()) + assert actual_mapping == expected_mapping + + # drop cached mapping and apply the same local changes + await cluster.refresh() + mapping = await cluster.mapping + + await mapping.add((component_1_s1, component_3_s2), host_1) + await mapping.remove(component_3_s2, host_5) + + await mapping.refresh(strategy=apply_local_changes) + + expected_mapping = ( + # base is remote, but with local changes + build_name_mapping(mapping_alt.iter()) + # remove what's removed locally + - build_name_mapping(((component_3_s2, host_5),)) + # add what's added locally + | build_name_mapping(((component_1_s1, host_1), (component_3_s2, host_1))) + ) + actual_mapping = build_name_mapping(mapping.iter()) + assert actual_mapping == expected_mapping diff --git a/tests/integration/test_misc.py b/tests/integration/test_misc.py new file mode 100644 index 00000000..13772aec --- /dev/null +++ b/tests/integration/test_misc.py @@ -0,0 +1,34 @@ +from typing import Self + +from asyncstdlib.functools import cached_property as async_cached_property +import pytest + +pytestmark = [pytest.mark.asyncio] + + +class Dummy: + def __init__(self: Self) -> None: + self.counter = 0 + + @async_cached_property + async def func(self: Self) -> int: + self.counter += 1 + + return self.counter + + +async def test_async_cached_property() -> None: + obj = Dummy() + assert "func" not in obj.__dict__, "`func` key should not be cached yet" + + res = await obj.func + assert res == 1 + assert "func" in obj.__dict__, "`func` key should be cached" + + res = await obj.func + assert res == 1, "Cached value must be used" + + delattr(obj, "func") + res = await obj.func + assert res == 2, "Expected to execute func() again, increasing the counter" + assert "func" in obj.__dict__ diff --git a/tests/integration/test_service.py b/tests/integration/test_service.py new file mode 100644 index 00000000..7933f9de --- /dev/null +++ b/tests/integration/test_service.py @@ -0,0 +1,210 @@ +from copy import copy +from pathlib import Path +from typing import Collection +import random +import string + +from httpx import AsyncClient +import pytest +import pytest_asyncio + +from adcm_aio_client.core.client import ADCMClient +from adcm_aio_client.core.config import ConfigHistoryNode, ObjectConfig +from adcm_aio_client.core.errors import ConflictError, MultipleObjectsReturnedError, ObjectDoesNotExistError +from adcm_aio_client.core.filters import Filter +from adcm_aio_client.core.objects._imports import Imports +from adcm_aio_client.core.objects.cm import Cluster, License, Service +from tests.integration.bundle import pack_bundle +from tests.integration.yaml import create_yaml + +pytestmark = [pytest.mark.asyncio] + + +def prepare_bundle_data() -> list[dict]: + config = [ + { + "name": "string_field", + "type": "string", + "default": "string_field value", + } + ] + + service = { + "type": "service", + "name": "Generated service", + "version": 1.0, + "config": config, + } + + service_manual_add = copy(service) + service_manual_add.update({"name": "Manual add", "license": "./service_license.txt"}) + + fifty_one_services = [] + for i in range(51): + service = copy(service) + service.update({"name": f"Generated service {i + 1}"}) + fifty_one_services.append(service) + + return [ + { + "type": "cluster", + "name": "Generated cluster", + "version": 1, + }, + *fifty_one_services, + service_manual_add, + ] + + +def assert_services_collection(services: Collection[Service], expected_amount: int) -> None: + assert all(isinstance(cluster, Service) for cluster in services) + assert len({service.id for service in services}) == expected_amount + assert len({id(service) for service in services}) == expected_amount + + +@pytest_asyncio.fixture() +async def cluster_52(adcm_client: ADCMClient, tmp_path: Path) -> Cluster: + """ + Cluster with 52 services, one not added + """ + + bundle_folder = tmp_path / "".join(random.sample(string.ascii_letters, k=6)).lower() + config_yaml_path = bundle_folder / "config.yaml" + create_yaml(data=prepare_bundle_data(), path=config_yaml_path) + + (bundle_folder / "service_license.txt").write_text("By using this test bundle, you agreeing to write tests well\n") + + bundle_path = pack_bundle(from_dir=bundle_folder, to=tmp_path) + bundle = await adcm_client.bundles.create(source=bundle_path) + + cluster = await adcm_client.clusters.create(bundle=bundle, name="Test cluster 52") + await cluster.services.add(filter_=Filter(attr="name", op="icontains", value="service")) + + return cluster + + +async def test_service_api(cluster_52: Cluster, httpx_client: AsyncClient) -> None: + cluster = cluster_52 + num_services = 51 + + await _test_service_create_delete_api(name="Manual add", cluster=cluster, httpx_client=httpx_client) + await _test_services_node(cluster=cluster, num_services=num_services) + await _test_service_object_api( + service=await cluster.services.get(name__eq="Generated service 1"), parent_cluster=cluster + ) + + +async def _test_service_create_delete_api(name: str, cluster: Cluster, httpx_client: AsyncClient) -> None: + target_service_filter = Filter(attr="name", op="eq", value=name) + + with pytest.raises(ConflictError, match="LICENSE_ERROR"): + await cluster.services.add(filter_=target_service_filter) + + service = await cluster.services.add(filter_=target_service_filter, accept_license=True) + assert len(service) == 1 + service = service[0] + + service_url_part = f"clusters/{cluster.id}/services/{service.id}/" + response = await httpx_client.get(service_url_part) + + assert response.status_code == 200 + service_data = response.json() + + assert service_data["id"] == service.id + assert service_data["name"] == name + + await service.delete() + response = await httpx_client.get(service_url_part) + assert response.status_code == 404 + + +async def _test_services_node(cluster: Cluster, num_services: int) -> None: + no_objects_msg = "^No objects found with the given filter.$" + multiple_objects_msg = "^More than one object found.$" + + # get + assert isinstance(await cluster.services.get(name__eq="Generated service 30"), Service) + + with pytest.raises(ObjectDoesNotExistError, match=no_objects_msg): + await cluster.services.get(name__eq="Non-existent service") + + with pytest.raises(MultipleObjectsReturnedError, match=multiple_objects_msg): + await cluster.services.get(name__in=["Generated service 1", "Generated service 2"]) + + # get_or_none + assert isinstance(await cluster.services.get_or_none(name__eq="Generated service 50"), Service) + + assert await cluster.services.get_or_none(name__eq="Non-existent service") is None + + with pytest.raises(MultipleObjectsReturnedError, match=multiple_objects_msg): + await cluster.services.get_or_none(name__in=["Generated service 1", "Generated service 2"]) + + # all + all_services = await cluster.services.all() + assert_services_collection(services=all_services, expected_amount=num_services) + + # list + page_size = 50 + assert page_size < num_services, "check page_size or number of services" + + first_page_services = await cluster.services.list() + assert_services_collection(services=first_page_services, expected_amount=page_size) + + # iter + iter_services = set() + async for service in cluster.services.iter(): + iter_services.add(service) + assert_services_collection(services=iter_services, expected_amount=num_services) + + # filter + name_filters_data = { + ("name__eq", "Generated service 8"): 1, + ("name__ieq", "GeNeRaTeD SeRvIcE 18"): 1, + ("name__ne", "Generated service 51"): num_services - 1, + ("name__ine", "GENERATED service 51"): num_services - 1, + ("name__in", ("Generated service 51", "Generated service 50", "GENERATED SERVICE 49", "Not a service")): 2, + ("name__iin", ("Generated service 51", "Generated service 50", "GENERATED SERVICE 49", "Not a service")): 3, + ("name__exclude", ("Generated service 1", "Generated service 2", "Not a service")): num_services - 2, + ("name__iexclude", ("GENERATED SERVICE 51", "Not a service")): num_services - 1, + ("name__contains", "38"): 1, + ("name__contains", "Generated"): num_services, + ("name__icontains", "TeD sErV"): num_services, + } + display_name_filters_data = { # display_names are the same as names + (f"display_{filter_[0]}", filter_[1]): expected for filter_, expected in name_filters_data.items() + } + + filters_data = { + **name_filters_data, + **display_name_filters_data, + ("status__eq", "up"): num_services, + ("status__eq", "down"): 0, + ("status__in", ("down", "some status")): 0, + ("status__in", ("up", "some status")): num_services, + ("status__ne", "down"): num_services, + ("status__ne", "up"): 0, + ("status__exclude", ("excluded_status", "down")): num_services, + ("status__exclude", ("excluded_status", "up")): 0, + ("status__exclude", ("up", "down")): 0, + } + for filter_, expected in filters_data.items(): + filter_value = {filter_[0]: filter_[1]} + services = await cluster.services.filter(**filter_value) + assert len(services) == expected, f"Filter: {filter_value}" + + +async def _test_service_object_api(service: Service, parent_cluster: Cluster) -> None: + assert isinstance(service.id, int) + assert isinstance(service.name, str) + assert isinstance(service.display_name, str) + assert isinstance(service.cluster, Cluster) + assert service.cluster.id == parent_cluster.id + assert isinstance(await service.license, License) + assert isinstance(await service.components.all(), list) + assert isinstance(await service.get_status(), str) + assert isinstance(await service.actions.all(), list) + assert isinstance(await service.config, ObjectConfig) + assert isinstance(service.config_history, ConfigHistoryNode) + assert isinstance(await service.imports, Imports) + assert isinstance(await service.config_host_groups.all(), list) + assert isinstance(await service.action_host_groups.all(), list) diff --git a/tests/integration/yaml.py b/tests/integration/yaml.py new file mode 100644 index 00000000..62725b2b --- /dev/null +++ b/tests/integration/yaml.py @@ -0,0 +1,16 @@ +from pathlib import Path + +import yaml + + +def create_yaml(data: list | dict, path: Path) -> None: + """ + :param data: desired .yaml file content + :param path: target .yaml path + """ + if path.suffix not in {".yaml", ".yml"}: + raise ValueError(f"Invalid .yaml/.yml path: {path}") + + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("w") as yaml_file: + yaml.dump(data, yaml_file, default_flow_style=False) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/bundles/config_example_v1/config.yaml b/tests/unit/bundles/config_example_v1/config.yaml new file mode 100644 index 00000000..04edb7da --- /dev/null +++ b/tests/unit/bundles/config_example_v1/config.yaml @@ -0,0 +1,69 @@ +- type: cluster + name: Cluster With Config Example + version: 1 + description: | + This bundle is designed to provide sample of config, + not nessesary including all config types or combinations. + Don't change configs of existing objects in it, + add new service / component if you need. +- type: service + name: with_json_fields_and_groups + version: 1.0 + + config: + - name: root_int + display_name: Integer At Root + type: integer + default: 100 + - name: root_list + display_name: List At Root + type: list + default: ["first", "second", "third"] + - name: root_dict + display_name: Map At Root + type: map + default: {"k1": "v1", "k2": "v2"} + required: false + - name: duplicate + display_name: Duplicate + type: string + default: "hehe" + - name: root_json + display_name: JSON At Root + type: json + default: {} + - name: main + display_name: Main Section + type: group + subs: + - name: inner_str + display_name: String In Group + type: string + default: "evil" + - name: inner_dict + display_name: Map In Group + type: map + default: {"a": "b"} + - name: inner_json + display_name: JSON In Group + type: json + default: {"complex": [], "jsonfield": 23, "server": "bestever"} + - name: duplicate + display_name: Duplicate + type: integer + default: 44 + - name: optional_group + display_name: Optional Section + type: group + activatable: true + active: false + subs: + - name: param + display_name: Param In Activatable Group + type: float + default: 44.44 + required: false + - name: root_str + display_name: String At Root + type: string + required: false diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 00000000..3df732e6 --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,13 @@ +from pathlib import Path + +import pytest + +from tests.unit.mocks.requesters import QueueRequester + +FILES = Path(__file__).parent / "files" +RESPONSES = FILES / "responses" + + +@pytest.fixture() +def queue_requester() -> QueueRequester: + return QueueRequester() diff --git a/tests/unit/files/responses/.description b/tests/unit/files/responses/.description new file mode 100644 index 00000000..837b946b --- /dev/null +++ b/tests/unit/files/responses/.description @@ -0,0 +1,2 @@ +This directory have samples of responses from ADCM +to use them as mock responses in unit tests. diff --git a/tests/unit/files/responses/test_config_example_config.json b/tests/unit/files/responses/test_config_example_config.json new file mode 100644 index 00000000..9a50a74a --- /dev/null +++ b/tests/unit/files/responses/test_config_example_config.json @@ -0,0 +1,37 @@ +{ + "id": 3, + "isCurrent": true, + "creationTime": "2024-11-21T06:38:58.517310Z", + "config": { + "main": { + "duplicate": 44, + "inner_str": "evil", + "inner_dict": { + "a": "b" + }, + "inner_json": "{\"server\": \"bestever\", \"complex\": [], \"jsonfield\": 23}" + }, + "root_int": 100, + "root_str": null, + "duplicate": "hehe", + "root_dict": { + "k1": "v1", + "k2": "v2" + }, + "root_json": "{}", + "root_list": [ + "first", + "second", + "third" + ], + "optional_group": { + "param": 44.44 + } + }, + "adcmMeta": { + "/optional_group": { + "isActive": false + } + }, + "description": "init" +} diff --git a/tests/unit/files/responses/test_config_example_config_schema.json b/tests/unit/files/responses/test_config_example_config_schema.json new file mode 100644 index 00000000..21981799 --- /dev/null +++ b/tests/unit/files/responses/test_config_example_config_schema.json @@ -0,0 +1,324 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Configuration", + "description": "", + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "nullValue": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + }, + "type": "object", + "properties": { + "root_int": { + "title": "Integer At Root", + "type": "integer", + "description": "", + "default": 100, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + } + }, + "root_list": { + "title": "List At Root", + "type": "array", + "description": "", + "default": [ + "first", + "second", + "third" + ], + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + }, + "items": { + "type": "string", + "title": "", + "description": "", + "default": null, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "nullValue": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + } + }, + "minItems": 1 + }, + "root_dict": { + "oneOf": [ + { + "title": "Map At Root", + "type": "object", + "description": "", + "default": { + "k1": "v1", + "k2": "v2" + }, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + }, + "additionalProperties": true, + "properties": {} + }, + { + "type": "null" + } + ] + }, + "duplicate": { + "title": "Duplicate", + "type": "string", + "description": "", + "default": "hehe", + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": { + "isMultiline": false + }, + "enumExtra": null + }, + "minLength": 1 + }, + "root_json": { + "title": "JSON At Root", + "type": "string", + "description": "", + "default": "{}", + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": { + "isMultiline": true + }, + "enumExtra": null + }, + "format": "json", + "minLength": 1 + }, + "main": { + "title": "Main Section", + "type": "object", + "description": "", + "default": {}, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + }, + "additionalProperties": false, + "properties": { + "inner_str": { + "title": "String In Group", + "type": "string", + "description": "", + "default": "evil", + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": { + "isMultiline": false + }, + "enumExtra": null + }, + "minLength": 1 + }, + "inner_dict": { + "title": "Map In Group", + "type": "object", + "description": "", + "default": { + "a": "b" + }, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + }, + "additionalProperties": true, + "properties": {}, + "minProperties": 1 + }, + "inner_json": { + "title": "JSON In Group", + "type": "string", + "description": "", + "default": "{\"complex\": [], \"jsonfield\": 23, \"server\": \"bestever\"}", + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": { + "isMultiline": true + }, + "enumExtra": null + }, + "format": "json", + "minLength": 1 + }, + "duplicate": { + "title": "Duplicate", + "type": "integer", + "description": "", + "default": 44, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + } + } + }, + "required": [ + "inner_str", + "inner_dict", + "inner_json", + "duplicate" + ] + }, + "optional_group": { + "title": "Optional Section", + "type": "object", + "description": "", + "default": {}, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": { + "isAllowChange": true + }, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + }, + "additionalProperties": false, + "properties": { + "param": { + "oneOf": [ + { + "title": "Param In Activatable Group", + "type": "number", + "description": "", + "default": 44.44, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": null, + "enumExtra": null + } + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "param" + ] + }, + "root_str": { + "oneOf": [ + { + "title": "String At Root", + "type": "string", + "description": "", + "default": null, + "readOnly": false, + "adcmMeta": { + "isAdvanced": false, + "isInvisible": false, + "activation": null, + "synchronization": null, + "isSecret": false, + "stringExtra": { + "isMultiline": false + }, + "enumExtra": null + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "required": [ + "root_int", + "root_list", + "root_dict", + "duplicate", + "root_json", + "main", + "optional_group", + "root_str" + ] +} diff --git a/tests/unit/mocks/__init__.py b/tests/unit/mocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/mocks/requesters.py b/tests/unit/mocks/requesters.py new file mode 100644 index 00000000..adba9357 --- /dev/null +++ b/tests/unit/mocks/requesters.py @@ -0,0 +1,70 @@ +from collections import deque +from dataclasses import dataclass, field +from typing import Self + +from adcm_aio_client.core.errors import ResponseDataConversionError +from adcm_aio_client.core.types import Credentials, PathPart, QueryParameters, Requester, RequesterResponse + +type FakeResponseData = dict | list + + +@dataclass(slots=True) +class QueueResponse(RequesterResponse): + data: FakeResponseData + + def as_list(self: Self) -> list: + if not isinstance(data := self.data, list): + message = f"Expected a list, got {type(data)}" + raise ResponseDataConversionError(message) + + return data + + def as_dict(self: Self) -> dict: + if not isinstance(data := self.data, dict): + message = f"Expected a dict, got {type(data)}" + raise ResponseDataConversionError(message) + + return data + + +@dataclass() +class QueueRequester(Requester): + queue: deque[FakeResponseData] = field(default_factory=deque) + + async def login(self: Self, credentials: Credentials) -> Self: + _ = credentials + return self + + async def get(self: Self, *path: PathPart, query: QueryParameters | None = None) -> RequesterResponse: + _ = path, query + return self._return_next_response() + + async def post_files(self: Self, *path: PathPart, files: dict | list) -> RequesterResponse: + _ = path, files + return self._return_next_response() + + async def post(self: Self, *path: PathPart, data: dict | list) -> RequesterResponse: + _ = path, data + return self._return_next_response() + + async def patch(self: Self, *path: PathPart, data: dict | list) -> RequesterResponse: + _ = path, data + return self._return_next_response() + + async def delete(self: Self, *path: PathPart) -> RequesterResponse: + _ = path + return self._return_next_response() + + # specifics + + def queue_responses(self: Self, *responses: FakeResponseData) -> Self: + self.queue.extend(responses) + return self + + def flush(self: Self) -> Self: + self.queue.clear() + return self + + def _return_next_response(self: Self) -> RequesterResponse: + next_response = self.queue.popleft() + return QueueResponse(data=next_response) diff --git a/tests/unit/test_accessors.py b/tests/unit/test_accessors.py new file mode 100644 index 00000000..489ed38c --- /dev/null +++ b/tests/unit/test_accessors.py @@ -0,0 +1,305 @@ +from typing import Any, AsyncGenerator, Callable, Self + +import pytest + +from adcm_aio_client.core.errors import InvalidFilterError, MultipleObjectsReturnedError, ObjectDoesNotExistError +from adcm_aio_client.core.filters import FilterBy, FilterByName, Filtering +from adcm_aio_client.core.objects._accessors import ( + Accessor, + NonPaginatedChildAccessor, + PaginatedAccessor, + PaginatedChildAccessor, +) +from adcm_aio_client.core.objects._base import InteractiveChildObject, InteractiveObject +from adcm_aio_client.core.types import Endpoint +from tests.unit.mocks.requesters import QueueRequester +from tests.unit.utils import n_entries_as_list + +pytestmark = [pytest.mark.asyncio] + + +no_validation = Filtering() + + +class _OwnPath: + def get_own_path(self: Self) -> Endpoint: + return () + + +class Dummy(_OwnPath, InteractiveObject): ... + + +class DummyChild(_OwnPath, InteractiveChildObject): ... + + +class DummyPaginatedAccessor(PaginatedAccessor[Dummy]): + class_type = Dummy + filtering = no_validation + + +class DummyChildPaginatedAccessor(PaginatedChildAccessor[Dummy, DummyChild]): + class_type = DummyChild + filtering = no_validation + + +class DummyChildNonPaginatedAccessor(NonPaginatedChildAccessor[Dummy, DummyChild]): + class_type = DummyChild + filtering = no_validation + + +class DummyAccessorWithFilter(PaginatedAccessor[Dummy]): + class_type = Dummy + filtering = Filtering(FilterByName, FilterBy("custom", {"eq"}, Dummy)) + + +def create_paginated_response(amount: int) -> dict: + return {"results": [{} for _ in range(amount)]} + + +def extract_paginated_response_entries(data: dict) -> list: + return data["results"] + + +def create_non_paginated_response(amount: int) -> list: + return [{} for _ in range(amount)] + + +async def test_paginated(queue_requester: QueueRequester) -> None: + requester = queue_requester + accessor = DummyPaginatedAccessor(requester=requester, path=()) + + await _test_paginated_accessor_common_methods( + accessor=accessor, + requester=requester, + create_response=create_paginated_response, + extract_entries=extract_paginated_response_entries, + check_entry=lambda entry: isinstance(entry, Dummy), + ) + + +async def test_paginated_child(queue_requester: QueueRequester) -> None: + requester = queue_requester + parent = Dummy(requester=requester, data={}) + accessor = DummyChildPaginatedAccessor(requester=requester, path=(), parent=parent) + + await _test_paginated_accessor_common_methods( + accessor=accessor, + requester=requester, + create_response=create_paginated_response, + extract_entries=extract_paginated_response_entries, + check_entry=lambda entry: isinstance(entry, DummyChild) and entry._parent is parent, + ) + + +async def test_non_paginated_child(queue_requester: QueueRequester) -> None: + requester = queue_requester + parent = Dummy(requester=requester, data={}) + accessor = DummyChildNonPaginatedAccessor(requester=requester, path=(), parent=parent) + create_response = create_non_paginated_response + check_entry = lambda entry: isinstance(entry, DummyChild) and entry._parent is parent # noqa: E731 + + response_sequence = (create_response(10), create_response(4), create_response(0)) + amount_of_entries = len(response_sequence[0]) + + # get + + requester.flush().queue_responses(create_response(1)) + result = await accessor.get() + + assert check_entry(result) + + requester.flush().queue_responses(create_response(0)) + + with pytest.raises(ObjectDoesNotExistError): + await accessor.get() + requester.flush().queue_responses(create_response(2)) + + with pytest.raises(MultipleObjectsReturnedError): + await accessor.get() + + # get or none + + requester.flush().queue_responses(create_response(1)) + result = await accessor.get_or_none() + + assert check_entry(result) + + requester.flush().queue_responses(create_response(0)) + result = await accessor.get_or_none() + + assert result is None + + requester.flush().queue_responses(create_response(2)) + + with pytest.raises(MultipleObjectsReturnedError): + await accessor.get_or_none() + + # list + + requester.flush().queue_responses(*response_sequence) + result = await accessor.list() + + assert isinstance(result, list) + assert len(result) == 10 + assert all(map(check_entry, result)) + + assert len(requester.queue) == len(response_sequence) - 1 + + # all + + requester.flush().queue_responses(*response_sequence) + result = await accessor.all() + + assert isinstance(result, list) + assert len(result) == amount_of_entries + assert all(map(check_entry, result)) + + assert len(requester.queue) == len(response_sequence) - 1 + + # filter (with no args is the same as all) + + requester.flush().queue_responses(*response_sequence) + result = await accessor.filter() + + assert isinstance(result, list) + assert len(result) == amount_of_entries + assert all(map(check_entry, result)) + + assert len(requester.queue) == len(response_sequence) - 1 + + # iter + + requester.flush().queue_responses(*response_sequence) + result = accessor.iter() + + # see no requests made at first + assert len(requester.queue) == len(response_sequence) + assert isinstance(result, AsyncGenerator) + + all_entries = [entry async for entry in result] + assert len(all_entries) == amount_of_entries + assert all(map(check_entry, all_entries)) + + # see 1 "pages" read, because it's not paginated + assert len(requester.queue) == len(response_sequence) - 1 + + +async def _test_paginated_accessor_common_methods[T: dict | list]( + accessor: Accessor, + requester: QueueRequester, + create_response: Callable[[int], T], + extract_entries: Callable[[T], list], + check_entry: Callable[[Any], bool], +) -> None: + response_sequence = (create_response(10), create_response(10), create_response(4), create_response(0)) + amount_of_all_entries = sum(map(len, map(extract_entries, response_sequence))) + + # get + + requester.flush().queue_responses(create_response(1)) + result = await accessor.get() + + assert check_entry(result) + + requester.flush().queue_responses(create_response(0)) + + with pytest.raises(ObjectDoesNotExistError): + await accessor.get() + + requester.flush().queue_responses(create_response(2)) + + with pytest.raises(MultipleObjectsReturnedError): + await accessor.get() + + # get or none + + requester.flush().queue_responses(create_response(1)) + result = await accessor.get_or_none() + + assert check_entry(result) + + requester.flush().queue_responses(create_response(0)) + result = await accessor.get_or_none() + + assert result is None + + requester.flush().queue_responses(create_response(2)) + + with pytest.raises(MultipleObjectsReturnedError): + await accessor.get_or_none() + + # list + + requester.flush().queue_responses(*response_sequence) + result = await accessor.list() + + assert isinstance(result, list) + assert len(result) == 10 + assert all(map(check_entry, result)) + assert len(requester.queue) == len(response_sequence) - 1 + + # all + + requester.flush().queue_responses(*response_sequence) + result = await accessor.all() + + assert isinstance(result, list) + assert len(result) == amount_of_all_entries + assert all(map(check_entry, result)) + + # filter (with no args is the same as all) + + requester.flush().queue_responses(*response_sequence) + result = await accessor.filter() + + assert isinstance(result, list) + assert len(result) == amount_of_all_entries + assert all(map(check_entry, result)) + + # iter + + requester.flush().queue_responses(*response_sequence) + result = accessor.iter() + + # see no requests made at first + assert len(requester.queue) == len(response_sequence) + assert isinstance(result, AsyncGenerator) + + n = 11 + first_entries = await n_entries_as_list(result, n=n) + assert len(first_entries) == n + + # see 2 "pages" read + assert len(requester.queue) == len(response_sequence) - 2 + + rest_entries = [i async for i in result] + assert len(rest_entries) == amount_of_all_entries - n + assert all(map(check_entry, (*first_entries, *rest_entries))) + + # now all results are read + assert len(requester.queue) == 0 + + +async def test_filter_validation(queue_requester: QueueRequester) -> None: + accessor = DummyAccessorWithFilter(requester=queue_requester, path=()) + + with pytest.raises(InvalidFilterError, match="by notexist is not allowed"): + await accessor.get(notexist__eq="sd") + + with pytest.raises(InvalidFilterError, match="Operation in is not allowed"): + await accessor.get(custom__in=["sd"]) + + with pytest.raises(InvalidFilterError, match="At least one entry is not"): + await accessor.get(name__iin=("sdlfkj", 1)) + + with pytest.raises(InvalidFilterError, match=f"1 is not {str}"): + await accessor.get(name__eq=1) + + with pytest.raises(InvalidFilterError, match="Multiple values expected for exclude"): + await accessor.get(name__exclude="sd") + + with pytest.raises(InvalidFilterError, match="Collection for filter shouldn't be empty"): + await accessor.get(name__exclude=[]) + + with pytest.raises(InvalidFilterError, match="Only one value is expected for icontains"): + await accessor.get(name__icontains={"sldkfj"}) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py new file mode 100644 index 00000000..d2ffc9ec --- /dev/null +++ b/tests/unit/test_config.py @@ -0,0 +1,166 @@ +from copy import deepcopy +from typing import Self +import json + +import pytest + +from adcm_aio_client.core.config._objects import ( + ActivatableParameterGroup, + ConfigOwner, + ObjectConfig, + Parameter, + ParameterGroup, +) +from adcm_aio_client.core.config.types import ConfigData, ConfigSchema +from adcm_aio_client.core.objects._base import InteractiveObject +from adcm_aio_client.core.types import Endpoint, Requester +from tests.unit.conftest import RESPONSES + + +class DummyParent(InteractiveObject): + def get_own_path(self: Self) -> Endpoint: + return ("dummy",) + + +@pytest.fixture() +def example_config() -> tuple[dict, dict]: + config = json.loads((RESPONSES / "test_config_example_config.json").read_text()) + + schema = json.loads((RESPONSES / "test_config_example_config_schema.json").read_text()) + + return config, schema + + +@pytest.fixture() +def dummy_parent(queue_requester: Requester) -> ConfigOwner: + return DummyParent(data={"id": 4}, requester=queue_requester) + + +@pytest.fixture() +def object_config(example_config: tuple[dict, dict], dummy_parent: ConfigOwner) -> ObjectConfig: + config_data, schema_data = example_config + + data = ConfigData.from_v2_response(data_in_v2_format=deepcopy(config_data)) + schema = ConfigSchema(spec_as_jsonschema=schema_data) + + return ObjectConfig(config=data, schema=schema, parent=dummy_parent) + + +def test_edit_config(example_config: tuple[dict, dict], object_config: ObjectConfig) -> None: + data, _ = example_config + + initial_parsed_data = deepcopy(data) + initial_parsed_data["config"]["root_json"] = json.loads(initial_parsed_data["config"]["root_json"]) + initial_parsed_data["config"]["main"]["inner_json"] = json.loads( + initial_parsed_data["config"]["main"]["inner_json"] + ) + + new_inner_json = { + "complex": [], + "jsonfield": 23, + "link": "do i look like a link to you?", + "arguments": ["-q", "something"], + } + new_root_json = ["now", "I am", "cool"] + + new_config = { + "root_int": 430, + "root_list": ["first", "second", "third", "best thing there is"], + "root_dict": None, + "duplicate": "hehe", + "root_json": new_root_json, + "main": { + "inner_str": "not the worst at least", + "inner_dict": {"a": "b", "additional": "keys", "are": "welcome"}, + "inner_json": new_inner_json, + "duplicate": 44, + }, + "optional_group": {"param": 44.44}, + "root_str": "newstring", + } + + # todo: + # - check no POST requests are performed + + config = object_config + + assert config.data.values == initial_parsed_data["config"] + assert config.data.attributes == initial_parsed_data["adcmMeta"] + + # Edit "root" values + + config["root_int", Parameter].set(new_config["root_int"]) + + # inner type won't be checked (list), + # but here we pretend "to be 100% sure" it's `list`, not `None` + config["root_list", Parameter].set([*config["root_list", Parameter[list]].value, new_config["root_list"][-1]]) + + root_dict = config["root_dict"] + assert isinstance(root_dict, Parameter) + assert isinstance(root_dict.value, dict) + root_dict.set(None) + assert root_dict.value is None + assert config["root_dict", Parameter].value is None + + # Edit group ("nested") values + + assert isinstance(config["main"], ParameterGroup) + # if we don't want type checker to bother us, we can yolo like that + config["main"]["inner_str"].set(new_config["main"]["inner_str"]) # type: ignore + + main_group = config["main"] + assert isinstance(main_group, ParameterGroup) + main_group["inner_dict", Parameter].set( + {**main_group["inner_dict", Parameter[dict]].value, "additional": "keys", "are": "welcome"} + ) + + activatable_group = config["optional_group"] + assert isinstance(activatable_group, ActivatableParameterGroup) + activatable_group.activate() + + # Edit JSON field + + # change value separately and set + json_field = main_group["inner_json"] + assert isinstance(json_field, Parameter) + assert isinstance(json_field.value, dict) + new_value = deepcopy(json_field.value) + new_value.pop("server") + new_value |= {"link": "do i look like a link to you?", "arguments": ["-q", "something"]} + json_field.set(new_value) + + # swap value type with direct set + assert isinstance(config["root_json"].value, dict) # type: ignore + config["root_json"].set(["now", "I am", "cool"]) # type: ignore + + # Type change specifics + + param = config["root_str"] + assert isinstance(param, Parameter) + assert param.value is None + + param.set("newstring") + assert isinstance(config["root_str"].value, str) # type: ignore + + # Check all values are changed + + config_for_save = config.data + assert config_for_save.values == new_config + assert config_for_save.attributes == {"/optional_group": {"isActive": True}} + + +def test_display_name_search(object_config: ObjectConfig) -> None: + # only display name search + assert object_config["Map At Root", Parameter].value == {"k1": "v1", "k2": "v2"} + assert object_config["Main Section", ParameterGroup]["String In Group", Parameter].value == "evil" + + # name and display name search mixed + assert object_config["root_int"] is object_config["Integer At Root"] + + value_1 = object_config["optional_group"]["Param In Activatable Group"] # type: ignore + value_2 = object_config["Optional Section"]["param"] # type: ignore + assert value_1 is value_2 + + # duplication at different levels + assert object_config["Duplicate", Parameter].value == "hehe" + assert object_config["Main Section", ParameterGroup]["Duplicate", Parameter].value == 44 diff --git a/tests/unit/test_objects_base.py b/tests/unit/test_objects_base.py new file mode 100644 index 00000000..309d467a --- /dev/null +++ b/tests/unit/test_objects_base.py @@ -0,0 +1,39 @@ +from functools import cached_property +from typing import Self + +import pytest + +from adcm_aio_client.core.objects._base import InteractiveObject +from adcm_aio_client.core.types import Endpoint +from tests.unit.mocks.requesters import QueueRequester + +pytestmark = [pytest.mark.asyncio] + + +async def test_cache_cleaning(queue_requester: QueueRequester) -> None: + class ObjectA(InteractiveObject): + def get_own_path(self: Self) -> Endpoint: + return "not", "important" + + @property + def plain(self: Self) -> str: + return self._data["name"] + + @cached_property + def complex(self: Self) -> str: + return self._data["name"] + + data_1 = {"id": 4, "name": "awesome"} + data_2 = {"id": 4, "name": "best"} + + instance = ObjectA(requester=queue_requester, data=data_1) + + assert instance.plain == instance.complex + assert instance.complex == data_1["name"] + + queue_requester.queue_responses(data_2) + + await instance.refresh() + + assert instance.plain == instance.complex + assert instance.complex == data_2["name"] diff --git a/tests/unit/test_requesters.py b/tests/unit/test_requesters.py new file mode 100644 index 00000000..b24d4749 --- /dev/null +++ b/tests/unit/test_requesters.py @@ -0,0 +1,127 @@ +from dataclasses import dataclass +from functools import partial +from typing import Any, AsyncGenerator, Self +import json + +from httpx import AsyncClient +import pytest +import pytest_asyncio + +from adcm_aio_client.core.errors import ResponseDataConversionError, UnknownError +from adcm_aio_client.core.requesters import DefaultRequester, HTTPXRequesterResponse +from adcm_aio_client.core.types import RetryPolicy + +pytestmark = [pytest.mark.asyncio] + + +@dataclass() +class HTTPXLikeResponse: + status_code: int = 200 + data: str = "{}" + content: bytes = b"" + + def json(self: Self) -> Any: # noqa: ANN401 + return json.loads(self.data) + + +def build_mock_response(response: HTTPXLikeResponse): # noqa: ANN201 + async def return_response(*a, **kw) -> HTTPXLikeResponse: # noqa: ANN002, ANN003 + _ = a, kw + return response + + return return_response + + +@pytest_asyncio.fixture() +async def httpx_requester() -> AsyncGenerator[DefaultRequester, None]: + retry_policy = RetryPolicy(1, 1) + async with AsyncClient() as dummy_client: + yield DefaultRequester(http_client=dummy_client, retries=retry_policy) + + +@pytest.mark.parametrize( + ("method", "status_code", "call_kwargs"), + [("get", 200, {}), ("post", 201, {"data": {}}), ("patch", 299, {"data": {}}), ("delete", 204, {})], + ids=lambda value: value if not isinstance(value, dict) else "kw", +) +async def test_successful_request( + method: str, status_code: int, call_kwargs: dict, httpx_requester: DefaultRequester, monkeypatch: pytest.MonkeyPatch +) -> None: + requester = httpx_requester + + response = HTTPXLikeResponse(status_code=status_code, data="{}") + return_response = build_mock_response(response) + monkeypatch.setattr(requester.client, "request", return_response) + + result = await getattr(requester, method)(**call_kwargs) + + assert isinstance(result, HTTPXRequesterResponse) + assert result.response is response + assert result.as_dict() == {} + + +async def test_successful_response_data_conversion( + httpx_requester: DefaultRequester, monkeypatch: pytest.MonkeyPatch +) -> None: + requester = httpx_requester + + return_response = build_mock_response(HTTPXLikeResponse(data="{}")) + monkeypatch.setattr(requester.client, "request", return_response) + + response = await requester.get() + assert response.as_dict() == {} + + return_response = build_mock_response(HTTPXLikeResponse(data="[]")) + monkeypatch.setattr(requester.client, "request", return_response) + + response = await requester.delete() + assert response.as_list() == [] + + +@pytest.mark.parametrize("status_code", [300, 301, 399, 400, 403, 499, 500, 501, 599]) +async def test_raising_client_error_for_status( + status_code: int, httpx_requester: DefaultRequester, monkeypatch: pytest.MonkeyPatch +) -> None: + requester = httpx_requester + + return_response = build_mock_response(HTTPXLikeResponse(status_code=status_code, data="")) + monkeypatch.setattr(requester.client, "request", return_response) + + for method in ( + partial(requester.get, query={}), + partial(requester.post, data={}), + partial(requester.patch, data={}), + requester.delete, + ): + with pytest.raises(UnknownError): + await method() + + +async def test_response_as_dict_error_on_wrong_type( + httpx_requester: DefaultRequester, monkeypatch: pytest.MonkeyPatch +) -> None: + requester = httpx_requester + + for incorrect_data in ("[]", "{,"): + return_response = build_mock_response(HTTPXLikeResponse(data=incorrect_data)) + monkeypatch.setattr(requester.client, "request", return_response) + + response = await requester.get() + + with pytest.raises(ResponseDataConversionError): + response.as_dict() + + +async def test_response_as_list_error_on_wrong_type( + httpx_requester: DefaultRequester, monkeypatch: pytest.MonkeyPatch +) -> None: + requester = httpx_requester + + for incorrect_data in ("{}", "[,"): + return_response = build_mock_response(HTTPXLikeResponse(data=incorrect_data)) + monkeypatch.setattr(requester.client, "request", return_response) + + response = await requester.get() + + with pytest.raises(ResponseDataConversionError): + response.as_list() diff --git a/tests/unit/utils.py b/tests/unit/utils.py new file mode 100644 index 00000000..e42b7579 --- /dev/null +++ b/tests/unit/utils.py @@ -0,0 +1,14 @@ +from typing import AsyncGenerator + + +async def n_entries_as_list[T](gen: AsyncGenerator[T, None], n: int) -> list[T]: + result = [] + i = 1 + + async for entry in gen: + result.append(entry) + if i == n: + break + i += 1 + + return result