diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 2b8e38fd..f86c7665 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -7,11 +7,11 @@ Alexander Haas Eileen Kuehn matthias.schnepf ubdsv +Max Kühn Rene Caspart Leon Schuhmacher R. Florian von Cube Benjamin Rottler -Max Kühn Sebastian Wozniewski mschnepf swozniewski diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index f38db8ae..ba3f6785 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -1,5 +1,5 @@ -.. Created by changelog.py at 2025-07-09, command - '/Users/giffler/.cache/pre-commit/repoecmh3ah8/py_env-python3.12/bin/changelog docs/source/changes compile --categories Added Changed Fixed Security Deprecated --output=docs/source/changelog.rst' +.. Created by changelog.py at 2025-10-01, command + '/Users/giffler/.cache/pre-commit/repoecmh3ah8/py_env-python3.13/bin/changelog docs/source/changes compile --categories Added Changed Fixed Security Deprecated --output=docs/source/changelog.rst' based on the format of 'https://keepachangelog.com/' ######### diff --git a/tardis/adapters/batchsystems/htcondor.py b/tardis/adapters/batchsystems/htcondor.py index 5c0b27f1..3193e01e 100644 --- a/tardis/adapters/batchsystems/htcondor.py +++ b/tardis/adapters/batchsystems/htcondor.py @@ -4,51 +4,178 @@ from ...interfaces.batchsystemadapter import MachineStatus from ...interfaces.executor import Executor from ...utilities.executors.shellexecutor import ShellExecutor -from ...utilities.utils import htcondor_cmd_option_formatter -from ...utilities.utils import csv_parser +from ...utilities.utils import ( + csv_parser, + htcondor_cmd_option_formatter, + htcondor_status_cmd_composer, +) from ...utilities.asynccachemap import AsyncCacheMap from ...utilities.attributedict import AttributeDict +from datetime import datetime, timedelta from functools import partial from shlex import quote +from types import MappingProxyType from typing import Iterable import logging logger = logging.getLogger("cobald.runtime.tardis.adapters.batchsystem.htcondor") +async def htcondor_get_collectors( + options: AttributeDict, executor: Executor +) -> list[str]: + """ + Asynchronously retrieve a list of HTCondor collector machine names. + + Runs ``condor_status -collector -af:t Machine`` (plus any additional formatted + options) using the provided executor, then parses the tab-delimited output + to extract and return the list of collector machine names. + + :param options: Additional options for the ``condor_status`` call, such as + ``{'pool': 'htcondor.example'}``, which will be formatted and appended + to the command. + :param executor: Executor used to run the ``condor_status`` command + asynchronously. + :return: List of collector machine names. + """ + class_ads = AttributeDict(Machine="Machine") + # Add collector query option, copy options since it is mutable + options = options | {"collector": None} + cmd = htcondor_status_cmd_composer( + attributes=class_ads, + options=options, + ) + + try: + condor_status = await executor.run_command(cmd) + except CommandExecutionFailure as cef: + logger.warning(f"condor_status could not be executed due to {cef}!") + raise + + return [ + row["Machine"] + for row in csv_parser( + input_csv=condor_status.stdout, + fieldnames=tuple(class_ads.keys()), + delimiter="\t", + ) + ] + + +async def htcondor_get_collector_start_dates( + options: AttributeDict, executor: Executor +) -> dict[str, datetime]: + """ + Asynchronously retrieve the master daemon start times from HTCondor for machines + running a collector daemon as well. Assuming both daemons have a similar start date. + Due to potential bug/feature in HTCondor, the DaemonStartTime of the Collector can + not be used directly. + (see https://www-auth.cs.wisc.edu/lists/htcondor-users/2025-July/msg00092.shtml) + + Runs ``condor_status -master -af:t Machine DaemonStartTime`` (plus any + additional formatted options) using the provided executor, and parses + the tab-delimited output into a dictionary. + + :param options: Additional options for the ``condor_status`` call, such as + ``{'pool': 'htcondor.example'}``, which will be formatted and appended + to the command. + :param executor: Executor used to run the ``condor_status`` command + asynchronously. + :return: List of master daemon start time for host running a collector as well. + (in datetime format). + """ + class_ads = AttributeDict(Machine="Machine", DaemonStartTime="DaemonStartTime") + htcondor_collectors = await htcondor_get_collectors(options, executor) + + # Add master query option, copy options since it is mutable + options = options | {"master": None} + + cmd = htcondor_status_cmd_composer( + attributes=class_ads, + options=options, + constraint=" || ".join(f'Machine == "{fqdn}"' for fqdn in htcondor_collectors), + ) + + try: + condor_status = await executor.run_command(cmd) + except CommandExecutionFailure as cef: + logger.warning(f"condor_status could not be executed due to {cef}!") + raise + + return { + row["Machine"]: datetime.fromtimestamp(int(row["DaemonStartTime"])) + for row in csv_parser( + input_csv=condor_status.stdout, + fieldnames=tuple(class_ads.keys()), + delimiter="\t", + ) + if row["Machine"] in htcondor_collectors + } + + async def htcondor_status_updater( - options: AttributeDict, attributes: AttributeDict, executor: Executor + options: AttributeDict, + attributes: AttributeDict, + executor: Executor, + cached_data: MappingProxyType, ) -> dict: """ Helper function to call ``condor_status -af`` asynchronously and to translate - the output into a dictionary + the output into a dictionary. + + If the HTCondor Collector has been running for less than 3600 seconds, + previously cached status data is used for machines that were already + available before the restart; otherwise, fresh status data is used. :param options: Additional options for the condor_status call. For example ``{'pool': 'htcondor.example'}`` will be translated into ``condor_status -af ... -pool htcondor.example`` - :type options: AttributeDict :param attributes: Additional fields to add to output of the ``condor_status -af`` response. - :type attributes: AttributeDict - :return: Dictionary containing the output of the ``condor_status`` command - :rtype: dict + :param executor: Executor to run the ``condor_status`` command asynchronously. + :param ro_cached_data: Cached output from previous ``condor_status -af`` call + :return: Dictionary containing the processed output of the ``condor_status`` + command, possibly merged with cached data depending on collector uptime. """ + # copy options, since they are mutable + options = AttributeDict(**options) - attributes_string = f'-af:t {" ".join(attributes.values())}' + collector_start_dates = await htcondor_get_collector_start_dates(options, executor) - options_string = htcondor_cmd_option_formatter(options) + threshold = timedelta(seconds=3600) + now = datetime.now() - cmd = f"condor_status {attributes_string} -constraint PartitionableSlot=?=True" + earliest_start_date = min(collector_start_dates.values()) + latest_start_date = max(collector_start_dates.values()) - if options_string: - cmd = f"{cmd} {options_string}" + if (now - earliest_start_date) < threshold: + # If all collectors have been running for less than 3600 seconds, + # use cached status for machines that were already available before the + # restart and update it with fresh data if available. + htcondor_status = cached_data.copy() - htcondor_status = {} + elif (now - latest_start_date) < threshold: + # If any collector has been running for more than 3600 seconds, use the oldest + # available one. + htcondor_status = {} + oldest_collector = min(collector_start_dates, key=collector_start_dates.get) + options.pool = oldest_collector + + else: + # Repopulate HTCondor status + htcondor_status = {} + + cmd = htcondor_status_cmd_composer( + attributes=attributes, + options=options, + constraint="PartitionableSlot=?=True", + ) try: logger.debug(f"HTCondor status update is running. Command: {cmd}") condor_status = await executor.run_command(cmd) + for row in csv_parser( input_csv=condor_status.stdout, fieldnames=tuple(attributes.keys()), @@ -57,7 +184,6 @@ async def htcondor_status_updater( ): status_key = row["TardisDroneUuid"] or row["Machine"].split(".")[0] htcondor_status[status_key] = row - except CommandExecutionFailure as cef: logger.warning(f"condor_status could not be executed due to {cef}!") raise @@ -81,7 +207,7 @@ def __init__(self): try: self.htcondor_options = config.BatchSystem.options except AttributeError: - self.htcondor_options = {} + self.htcondor_options = AttributeDict() attributes = dict( Machine="Machine", @@ -101,6 +227,7 @@ def __init__(self): self._executor, ), max_age=config.BatchSystem.max_age * 60, + provide_cache=True, ) async def disintegrate_machine(self, drone_uuid: str) -> None: @@ -109,7 +236,6 @@ async def disintegrate_machine(self, drone_uuid: str) -> None: :param drone_uuid: Uuid of the worker node, for some sites corresponding to the host name of the drone. - :type drone_uuid: str :return: None """ return @@ -121,7 +247,6 @@ async def drain_machine(self, drone_uuid: str) -> None: :param drone_uuid: Uuid of the worker node, for some sites corresponding to the host name of the drone. - :type drone_uuid: str :return: None """ await self._htcondor_status.update_status() @@ -157,7 +282,6 @@ async def integrate_machine(self, drone_uuid: str) -> None: :param drone_uuid: Uuid of the worker node, for some sites corresponding to the host name of the drone. - :type drone_uuid: str :return: None """ return None @@ -170,9 +294,7 @@ async def get_resource_ratios(self, drone_uuid: str) -> Iterable[float]: :param drone_uuid: Uuid of the worker node, for some sites corresponding to the host name of the drone. - :type drone_uuid: str :return: Iterable of float containing the ratios - :rtype: Iterable[float] """ await self._htcondor_status.update_status() try: @@ -192,9 +314,7 @@ async def get_allocation(self, drone_uuid: str) -> float: :param drone_uuid: Uuid of the worker node, for some sites corresponding to the host name of the drone. - :type drone_uuid: str :return: The allocation of a worker node as described above. - :rtype: float """ return max(await self.get_resource_ratios(drone_uuid), default=0.0) @@ -205,10 +325,8 @@ async def get_machine_status(self, drone_uuid: str) -> MachineStatus: :param drone_uuid: Uuid of the worker node, for some sites corresponding to the host name of the drone. - :type drone_uuid: str :return: The machine status in HTCondor (Available, Draining, Drained, NotAvailable) - :rtype: MachineStatus """ status_mapping = { ("Unclaimed", "Idle"): MachineStatus.Available, @@ -236,20 +354,17 @@ async def get_utilisation(self, drone_uuid: str) -> float: :param drone_uuid: Uuid of the worker node, for some sites corresponding to the host name of the drone. - :type drone_uuid: str :return: The utilisation of a worker node as described above. - :rtype: float """ return min(await self.get_resource_ratios(drone_uuid), default=0.0) @property def machine_meta_data_translation_mapping(self) -> AttributeDict: """ - The machine meta data translation mapping is used to translate units of - the machine meta data in ``TARDIS`` to values expected by the + The machine metadata translation mapping is used to translate units of + the machine metadata in ``TARDIS`` to values expected by the HTCondor batch system adapter. - :return: Machine meta data translation mapping - :rtype: AttributeDict + :return: Machine metadata translation mapping """ return AttributeDict(Cores=1, Memory=1024, Disk=1024 * 1024) diff --git a/tardis/utilities/asynccachemap.py b/tardis/utilities/asynccachemap.py index d05427c0..1d9e2581 100644 --- a/tardis/utilities/asynccachemap.py +++ b/tardis/utilities/asynccachemap.py @@ -2,6 +2,8 @@ from collections.abc import Mapping from datetime import datetime from datetime import timedelta +from functools import partial +from types import MappingProxyType import asyncio import logging @@ -11,10 +13,16 @@ class AsyncCacheMap(Mapping): - def __init__(self, update_coroutine, max_age: int = 60 * 15): + def __init__( + self, + update_coroutine, + max_age: int = 60 * 15, + provide_cache: bool = False, + ): self._update_coroutine = update_coroutine self._max_age = max_age self._last_update = datetime.fromtimestamp(0) + self._provide_cache = provide_cache self._data = {} self._lock = None @@ -26,17 +34,27 @@ def _async_lock(self): self._lock = asyncio.Lock() return self._lock + @property + def read_only_cache(self): + return MappingProxyType(self._data) + @property def last_update(self) -> datetime: return self._last_update + @property + def update_coroutine(self): + if self._provide_cache: + return partial(self._update_coroutine, self.read_only_cache) + return self._update_coroutine + async def update_status(self) -> None: current_time = datetime.now() async with self._async_lock: if (current_time - self._last_update) > timedelta(seconds=self._max_age): try: - data = await self._update_coroutine() + data = await self.update_coroutine() except json.decoder.JSONDecodeError as je: logger.warning( f"AsyncMap update_status failed: Could not decode json {je}" @@ -57,13 +75,4 @@ def __len__(self): return len(self._data) def __eq__(self, other): - if not isinstance(other, AsyncCacheMap): - return False - - return ( - self._update_coroutine == other._update_coroutine - and self._max_age == other._max_age - and self._last_update == other._last_update - and self._data == other._data - and self._lock == other._lock - ) + return self is other diff --git a/tardis/utilities/attributedict.py b/tardis/utilities/attributedict.py index 755e7eac..5c25dad1 100644 --- a/tardis/utilities/attributedict.py +++ b/tardis/utilities/attributedict.py @@ -28,3 +28,6 @@ def __delattr__(self, item): raise AttributeError( f"{item} is not a valid attribute. Dict contains {str(self)}." ) from None + + def __or__(self, other): + return AttributeDict({**self, **other}) diff --git a/tardis/utilities/utils.py b/tardis/utilities/utils.py index a41f5246..c19d762c 100644 --- a/tardis/utilities/utils.py +++ b/tardis/utilities/utils.py @@ -2,7 +2,7 @@ from contextlib import contextmanager from io import StringIO -from typing import Any, Callable, Dict, List, TypeVar, Tuple +from typing import Any, Callable, Dict, Optional, TypeVar, Union import csv @@ -24,11 +24,43 @@ def htcondor_cmd_option_formatter(options: AttributeDict) -> str: return cmd_option_formatter(options, prefix="-", separator=" ") +def htcondor_status_cmd_composer( + attributes: AttributeDict, + options: Optional[AttributeDict] = None, + constraint: Optional[str] = None, +) -> str: + """ + Composes an `condor_status` command string from attributes (classads), options, + and an optional constraint. This function does not execute the command, + it only returns the assembled command string. + + :param attributes: Mapping of attribute names to values, used to construct + the `-af:t` argument. + :param options: Additional HTCondor command-line options, formatted by + `htcondor_cmd_option_formatter`. + :param constraint: Constraint expression to filter results + (e.g., "PartitionableSlot==True"). + :return: Fully assembled `condor_status` command string. + """ + attributes_string = f'-af:t {" ".join(attributes.values())}' # noqa: E231 + + cmd = f"condor_status {attributes_string}" + + if constraint: + cmd = f"{cmd} -constraint '{constraint}'" + + if options: + options_string = htcondor_cmd_option_formatter(options) + cmd = f"{cmd} {options_string}" + + return cmd + + def csv_parser( input_csv: str, - fieldnames: [List, Tuple], + fieldnames: Union[list[str], tuple[str, ...]], delimiter: str = "\t", - replacements: dict = None, + replacements: Optional[dict[str, Any]] = None, skipinitialspace: bool = False, skiptrailingspace: bool = False, ): @@ -36,17 +68,11 @@ def csv_parser( Parses CSV formatted input :param input_csv: CSV formatted input - :type input_csv: str :param fieldnames: corresponding field names - :type fieldnames: [List, Tuple] :param delimiter: delimiter between entries - :type delimiter: str :param replacements: fields to be replaced - :type replacements: dict :param skipinitialspace: ignore whitespace immediately following the delimiter - :type skipinitialspace: bool :param skiptrailingspace: ignore whitespace at the end of each csv row - :type skiptrailingspace: bool """ if skiptrailingspace: input_csv = "\n".join((line.strip() for line in input_csv.splitlines())) @@ -89,7 +115,6 @@ def machine_meta_data_translation( :param meta_data_translation_mapping: Map used for the translation of meta data, contains conversion factors :return: Converted meta data with units expected by the OBS - :rtype: dict """ try: return { diff --git a/tests/adapters_t/batchsystems_t/test_htcondor.py b/tests/adapters_t/batchsystems_t/test_htcondor.py index 88e6e494..a51cd09e 100644 --- a/tests/adapters_t/batchsystems_t/test_htcondor.py +++ b/tests/adapters_t/batchsystems_t/test_htcondor.py @@ -1,21 +1,30 @@ from tests.utilities.utilities import run_async -from tests.utilities.utilities import mock_executor_run_command +from tests.utilities.utilities import mock_executor_run_command_new + from tardis.adapters.batchsystems.htcondor import HTCondorAdapter -from tardis.adapters.batchsystems.htcondor import htcondor_status_updater +from tardis.adapters.batchsystems.htcondor import ( + htcondor_status_updater, + htcondor_get_collectors, + htcondor_get_collector_start_dates, +) from tardis.interfaces.batchsystemadapter import MachineStatus from tardis.exceptions.executorexceptions import CommandExecutionFailure from tardis.utilities.attributedict import AttributeDict +from datetime import datetime from functools import partial from shlex import quote +from types import MappingProxyType from unittest.mock import patch from unittest import TestCase import logging +NOW = int(datetime.now().timestamp()) + CPU_RATIO = 0.9 MEMORY_RATIO = 0.8 -CONDOR_RETURN = "\n".join( +CONDOR_STATUS_RETURN = "\n".join( [ f"test\tslot1@test\tUnclaimed\tIdle\tundefined\t{CPU_RATIO}\t{MEMORY_RATIO}", # noqa: B950 f"test_drain\tslot1@test\tDrained\tRetiring\tundefined\t{CPU_RATIO}\t{MEMORY_RATIO}", # noqa: B950 @@ -28,6 +37,117 @@ ] ) +CONDOR_STATUS_RETURN_DICT = { + "test": { + "Machine": "test", + "Name": "slot1@test", + "State": "Unclaimed", + "Activity": "Idle", + "TardisDroneUuid": None, + "cpu_ratio": "0.9", + "memory_ratio": "0.8", + }, + "test_drain": { + "Machine": "test_drain", + "Name": "slot1@test", + "State": "Drained", + "Activity": "Retiring", + "TardisDroneUuid": None, + "cpu_ratio": "0.9", + "memory_ratio": "0.8", + }, + "test_drained": { + "Machine": "test_drained", + "Name": "slot1@test", + "State": "Drained", + "Activity": "Idle", + "TardisDroneUuid": None, + "cpu_ratio": "0.9", + "memory_ratio": "0.8", + }, + "test_owner": { + "Machine": "test_owner", + "Name": "slot1@test", + "State": "Owner", + "Activity": "Idle", + "TardisDroneUuid": None, + "cpu_ratio": "0.9", + "memory_ratio": "0.8", + }, + "test_uuid": { + "Machine": "test_uuid_plus", + "Name": "slot1@test_uuid@test", + "State": "Unclaimed", + "Activity": "Idle", + "TardisDroneUuid": "test_uuid", + "cpu_ratio": "0.9", + "memory_ratio": "0.8", + }, + "test_undefined": { + "Machine": "test_undefined", + "Name": "slot1@test", + "State": "Unclaimed", + "Activity": "Idle", + "TardisDroneUuid": None, + "cpu_ratio": None, + "memory_ratio": "0.8", + }, + "test_error": { + "Machine": "test_error", + "Name": "slot1@test", + "State": "Unclaimed", + "Activity": "Idle", + "TardisDroneUuid": None, + "cpu_ratio": "error", + "memory_ratio": "0.8", + }, + "exoscale-26d361290f": { + "Machine": "exoscale-26d361290f", + "Name": "slot1@exoscale-26d361290f", + "State": "Unclaimed", + "Activity": "Idle", + "TardisDroneUuid": None, + "cpu_ratio": "0.125", + "memory_ratio": "0.125", + }, +} + +CONDOR_STATUS_GRACEFUL_RETURN = ( + "" # worst case no resources are displayed after collector restart +) + +CONDOR_COLLECTOR_STATUS_RETURN = """ +cloud-htcondor-rhel8.gridka.de +cloud-htcondor.gridka.de +""" + +CONDOR_MASTER_STATUS_RETURN = """ +cloud-htcondor-ce-1-kit.gridka.de\t1744879933 +cloud-htcondor-ce-2-kit.gridka.de\t1745338074 +cloud-htcondor-ce-3-kit.gridka.de\t1750838558 +cloud-htcondor-rhel8.gridka.de\t1753949919 +cloud-htcondor.gridka.de\t1753947411 +cloud-tardis.gridka.de\t1753949555 +""" + +CONDOR_MASTER_STATUS_OLDEST_RETURN = f""" +cloud-htcondor-ce-1-kit.gridka.de\t1744879933 +cloud-htcondor-ce-2-kit.gridka.de\t1745338074 +cloud-htcondor-ce-3-kit.gridka.de\t1750838558 +cloud-htcondor-rhel8.gridka.de\t1753949919 +cloud-htcondor.gridka.de\t{NOW} +cloud-tardis.gridka.de\t1753949555 +""" + +CONDOR_MASTER_STATUS_GRACEFUL_RETURN = f""" +cloud-htcondor-ce-1-kit.gridka.de\t1744879933 +cloud-htcondor-ce-2-kit.gridka.de\t1745338074 +cloud-htcondor-ce-3-kit.gridka.de\t1750838558 +cloud-htcondor-rhel8.gridka.de\t{NOW} +cloud-htcondor.gridka.de\t{NOW} +cloud-tardis.gridka.de\t1753949555 +""" + class TestHTCondorAdapter(TestCase): mock_config_patcher = None @@ -55,14 +175,14 @@ def setUp(self): self.command = ( "condor_status -af:t Machine Name State Activity TardisDroneUuid " "'Real(TotalSlotCpus-Cpus)/TotalSlotCpus' " - "'Real(TotalSlotMemory-Memory)/TotalSlotMemory' -constraint PartitionableSlot=?=True" # noqa: B950 + "'Real(TotalSlotMemory-Memory)/TotalSlotMemory' -constraint 'PartitionableSlot=?=True'" # noqa: B950 " -pool my-htcondor.local -test" ) self.command_wo_options = ( "condor_status -af:t Machine Name State Activity TardisDroneUuid " "'Real(TotalSlotCpus-Cpus)/TotalSlotCpus' " - "'Real(TotalSlotMemory-Memory)/TotalSlotMemory' -constraint PartitionableSlot=?=True" # noqa: B950 + "'Real(TotalSlotMemory-Memory)/TotalSlotMemory' -constraint 'PartitionableSlot=?=True'" # noqa: B950 ) self.setup_config_mock(options={"pool": "my-htcondor.local", "test": None}) @@ -90,7 +210,37 @@ def test_disintegrate_machine(self): run_async(self.htcondor_adapter.disintegrate_machine, drone_uuid="test") ) - @mock_executor_run_command(stdout=CONDOR_RETURN) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + AttributeDict( + stdout="", stderr="", exit_code=0 + ), # 1st call of drain_machine + AttributeDict( + stdout="", stderr="", exit_code=0 + ), # 2nd call of drain_machine + CommandExecutionFailure( + message="Does not exists", + exit_code=1, + stderr="Does not exists", + stdout="Does not exists", + ), # test exit code 1: HTCondor can't connect to StartD of Drone + CommandExecutionFailure( + message="Unhandled error", + exit_code=2, + stderr="Unhandled error", + stdout="Unhandled error", + ), # test arbitrary exit code, should lead to re-raised exception + ] + ) def test_drain_machine(self): run_async(self.htcondor_adapter.drain_machine, drone_uuid="test") self.mock_executor.return_value.run_command.assert_called_with( @@ -103,30 +253,22 @@ def test_drain_machine(self): self.mock_executor.return_value.run_command.assert_called_with( "condor_drain -pool my-htcondor.local -test -graceful slot1@test_uuid@test" ) - self.assertIsNone( + + self.mock_executor.reset_mock() + + self.assertIsNone( # should not run self._executor.run_command(cmd) run_async(self.htcondor_adapter.drain_machine, drone_uuid="not_exists") ) - self.mock_executor.return_value.run_command.side_effect = ( - CommandExecutionFailure( - message="Does not exists", - exit_code=1, - stderr="Does not exists", - stdout="Does not exists", - ) - ) + self.mock_executor.return_value.run_command.assert_not_called() + + self.mock_executor.reset_mock() + with self.assertLogs(level=logging.WARNING): self.assertIsNone( run_async(self.htcondor_adapter.drain_machine, drone_uuid="test") ) - self.mock_executor.return_value.run_command.side_effect = ( - CommandExecutionFailure( - message="Unhandled error", - exit_code=2, - stderr="Unhandled error", - stdout="Unhandled error", - ) - ) + self.mock_executor.reset_mock() with self.assertRaises(CommandExecutionFailure): with self.assertLogs(level=logging.CRITICAL): @@ -134,9 +276,25 @@ def test_drain_machine(self): run_async(self.htcondor_adapter.drain_machine, drone_uuid="test") ) - self.mock_executor.return_value.run_command.side_effect = None - - # @mock_executor_run_command(stdout=CONDOR_RETURN) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + AttributeDict( + stdout="", stderr="", exit_code=0 + ), # 1st call of drain_machine + AttributeDict( + stdout="", stderr="", exit_code=0 + ), # 2nd call of drain_machine + ] + ) def test_drain_machine_without_options(self): self.setup_config_mock() self.htcondor_adapter = HTCondorAdapter() @@ -158,7 +316,19 @@ def test_integrate_machine(self): run_async(self.htcondor_adapter.integrate_machine, drone_uuid="test") ) - @mock_executor_run_command(stdout=CONDOR_RETURN) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) def test_get_resource_ratios(self): self.assertCountEqual( list( @@ -194,7 +364,19 @@ def test_get_resource_ratios(self): [], ) - @mock_executor_run_command(stdout=CONDOR_RETURN) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) def test_get_resource_ratios_without_options(self): self.setup_config_mock() del self.config.BatchSystem.options @@ -211,7 +393,19 @@ def test_get_resource_ratios_without_options(self): self.command_wo_options ) - @mock_executor_run_command(stdout=CONDOR_RETURN) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) def test_get_allocation(self): self.assertEqual( run_async(self.htcondor_adapter.get_allocation, drone_uuid="test"), @@ -219,7 +413,19 @@ def test_get_allocation(self): ) self.mock_executor.return_value.run_command.assert_called_with(self.command) - @mock_executor_run_command(stdout=CONDOR_RETURN) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) def test_get_machine_status(self): self.assertEqual( run_async(self.htcondor_adapter.get_machine_status, drone_uuid="test"), @@ -227,6 +433,7 @@ def test_get_machine_status(self): ) self.mock_executor.return_value.run_command.assert_called_with(self.command) self.mock_executor.reset_mock() + self.assertEqual( run_async( self.htcondor_adapter.get_machine_status, drone_uuid="not_exists" @@ -234,6 +441,7 @@ def test_get_machine_status(self): MachineStatus.NotAvailable, ) self.mock_executor.reset_mock() + self.assertEqual( run_async( self.htcondor_adapter.get_machine_status, drone_uuid="test_drain" @@ -241,6 +449,7 @@ def test_get_machine_status(self): MachineStatus.Draining, ) self.mock_executor.reset_mock() + self.assertEqual( run_async( self.htcondor_adapter.get_machine_status, drone_uuid="test_drained" @@ -248,6 +457,7 @@ def test_get_machine_status(self): MachineStatus.Drained, ) self.mock_executor.reset_mock() + self.assertEqual( run_async( self.htcondor_adapter.get_machine_status, drone_uuid="test_owner" @@ -260,14 +470,112 @@ def test_get_machine_status(self): run_async(self.htcondor_adapter.get_machine_status, drone_uuid="test_uuid"), MachineStatus.Available, ) - self.mock_executor.reset_mock() - self.mock_executor.return_value.run_command.side_effect = ( - CommandExecutionFailure( - message="Test", exit_code=123, stderr="Test", stdout="Test" - ) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) + def test_htcondor_status_updater(self): + attributes = { + "Machine": "Machine", + "Name": "Name", + "State": "State", + "Activity": "Activity", + "TardisDroneUuid": "TardisDroneUuid", + } + # Escape htcondor expressions and add them to attributes + attributes.update( + {key: quote(value) for key, value in self.config.BatchSystem.ratios.items()} + ) + + ro_cached_data = MappingProxyType({}) + + self.assertDictEqual( + CONDOR_STATUS_RETURN_DICT, + run_async( + partial( + htcondor_status_updater, + self.config.BatchSystem.options, + attributes, + self.mock_executor.return_value, + ro_cached_data, + ) + ), + ) + + # cache should be empty on first access + self.assertDictEqual(dict(ro_cached_data), {}) + + self.mock_executor.return_value.run_command.assert_called_with(self.command) + + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_GRACEFUL_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_GRACEFUL_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) + def test_htcondor_status_updater_graceful(self): + attributes = { + "Machine": "Machine", + "Name": "Name", + "State": "State", + "Activity": "Activity", + "TardisDroneUuid": "TardisDroneUuid", + } + # Escape htcondor expressions and add them to attributes + attributes.update( + {key: quote(value) for key, value in self.config.BatchSystem.ratios.items()} + ) + + # Populate cache with expected results + ro_cached_data = MappingProxyType(CONDOR_STATUS_RETURN_DICT) + + # check that no resources have been deleted and cached data is used + self.assertDictEqual( + CONDOR_STATUS_RETURN_DICT, + run_async( + partial( + htcondor_status_updater, + self.config.BatchSystem.options, + attributes, + self.mock_executor.return_value, + ro_cached_data, + ) + ), ) + self.mock_executor.return_value.run_command.assert_called_with(self.command) + + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_OLDEST_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) + def test_htcondor_status_updater_oldest(self): attributes = { "Machine": "Machine", "Name": "Name", @@ -279,6 +587,58 @@ def test_get_machine_status(self): attributes.update( {key: quote(value) for key, value in self.config.BatchSystem.ratios.items()} ) + + ro_cached_data = MappingProxyType({}) + + # check that no resources have been deleted and cached data is used + self.assertDictEqual( + CONDOR_STATUS_RETURN_DICT, + run_async( + partial( + htcondor_status_updater, + self.config.BatchSystem.options, + attributes, + self.mock_executor.return_value, + ro_cached_data, + ) + ), + ) + + self.mock_executor.return_value.run_command.assert_called_with( + self.command.replace( + "my-htcondor.local", "cloud-htcondor-rhel8.gridka.de" + ) # should query the oldest collector using -pool + ) + + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + CommandExecutionFailure( + message="Test", exit_code=123, stderr="Test", stdout="Test" + ), # test handling and logging when CommandExecutionFailure is raised + ] + ) + def test_htcondor_status_updater_cef(self): + # test handling and logging when CommandExecutionFailure is raised + attributes = { + "Machine": "Machine", + "Name": "Name", + "State": "State", + "Activity": "Activity", + "TardisDroneUuid": "TardisDroneUuid", + } + # Escape htcondor expressions and add them to attributes + attributes.update( + {key: quote(value) for key, value in self.config.BatchSystem.ratios.items()} + ) + + ro_cached_data = MappingProxyType({}) + with self.assertLogs(level=logging.WARNING): with self.assertRaises(CommandExecutionFailure): run_async( @@ -287,12 +647,24 @@ def test_get_machine_status(self): self.config.BatchSystem.options, attributes, self.mock_executor.return_value, + ro_cached_data, ) ) self.mock_executor.return_value.run_command.assert_called_with(self.command) - self.mock_executor.return_value.run_command.side_effect = None - @mock_executor_run_command(stdout=CONDOR_RETURN) + @mock_executor_run_command_new( + [ + AttributeDict( + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collectors + AttributeDict( + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_get_collector_start_dates + AttributeDict( + stdout=CONDOR_STATUS_RETURN, stderr="", exit_code=0 + ), # call in htcondor_status_updater + ] + ) def test_get_utilisation(self): self.assertEqual( run_async(self.htcondor_adapter.get_utilisation, drone_uuid="test"), @@ -305,3 +677,219 @@ def test_machine_meta_data_translation_mapping(self): AttributeDict(Cores=1, Memory=1024, Disk=1024 * 1024), self.htcondor_adapter.machine_meta_data_translation_mapping, ) + + @mock_executor_run_command_new( + [ + AttributeDict( # for call in htcondor_get_collectors + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), + ] + ) + def test_htcondor_get_collectors(self): + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + result = run_async(htcondor_get_collectors, options, executor) + + # Expected: split collector names by newline and strip empty lines + expected = [ + "cloud-htcondor-rhel8.gridka.de", + "cloud-htcondor.gridka.de", + ] + self.assertEqual(result, expected) + + # Command string should match expected with options + self.mock_executor.return_value.run_command.assert_called_with( + "condor_status -af:t Machine -pool my-htcondor.local -test -collector" + ) + + @mock_executor_run_command_new( + [ + AttributeDict( # for htcondor_get_collectors + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), + ] + ) + def test_htcondor_get_collectors_without_options(self): + # Remove options entirely to simulate default behavior + self.setup_config_mock() + + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + result = run_async(htcondor_get_collectors, options, executor) + + expected = [ + "cloud-htcondor-rhel8.gridka.de", + "cloud-htcondor.gridka.de", + ] + self.assertEqual(result, expected) + + self.mock_executor.return_value.run_command.assert_called_with( + "condor_status -af:t Machine -collector" + ) + + @mock_executor_run_command_new( + [ + CommandExecutionFailure( # simulate failure in htcondor_get_collectors + message="Collector not reachable", + exit_code=1, + stderr="Collector not reachable", + stdout="", + ), + ] + ) + def test_htcondor_get_collectors_failure(self): + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + with self.assertLogs(level=logging.WARNING): + with self.assertRaises(CommandExecutionFailure): + run_async(htcondor_get_collectors, options, executor) + + @mock_executor_run_command_new( + [ + AttributeDict( # call in htcondor_get_collectors + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), + AttributeDict( # call in htcondor_get_collector_start_dates + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), + ] + ) + def test_htcondor_get_collector_start_dates(self): + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + result = run_async(htcondor_get_collector_start_dates, options, executor) + + # We expect only machines from CONDOR_COLLECTOR_STATUS_RETURN to be included + expected_times = { + "cloud-htcondor-rhel8.gridka.de": datetime.fromtimestamp(1753949919), + "cloud-htcondor.gridka.de": datetime.fromtimestamp(1753947411), + } + self.assertEqual(result, expected_times) + print(self.mock_executor.return_value.run_command.mock_calls) + # Ensure both commands were called with proper formatting + self.mock_executor.return_value.run_command.assert_any_call( + "condor_status -af:t Machine -pool my-htcondor.local -test -collector" + ) + self.mock_executor.return_value.run_command.assert_any_call( + 'condor_status -af:t Machine DaemonStartTime -constraint \'Machine == "cloud-htcondor-rhel8.gridka.de" || Machine == "cloud-htcondor.gridka.de"\' -pool my-htcondor.local -test -master' # noqa B950 + ) + + @mock_executor_run_command_new( + [ + AttributeDict( # call in htcondor_get_collectors + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), + AttributeDict( # call in htcondor_get_collector_start_dates + stdout=CONDOR_MASTER_STATUS_RETURN, stderr="", exit_code=0 + ), + ] + ) + def test_htcondor_get_collector_start_dates_without_options(self): + self.setup_config_mock() + + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + result = run_async(htcondor_get_collector_start_dates, options, executor) + + expected_times = { + "cloud-htcondor-rhel8.gridka.de": datetime.fromtimestamp(1753949919), + "cloud-htcondor.gridka.de": datetime.fromtimestamp(1753947411), + } + self.assertEqual(result, expected_times) + + self.mock_executor.return_value.run_command.assert_any_call( + "condor_status -af:t Machine -collector" + ) + self.mock_executor.return_value.run_command.assert_any_call( + 'condor_status -af:t Machine DaemonStartTime -constraint \'Machine == "cloud-htcondor-rhel8.gridka.de" || Machine == "cloud-htcondor.gridka.de"\' -master' # noqa B950 + ) + + @mock_executor_run_command_new( + [ + AttributeDict( # call in htcondor_get_collectors + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), + AttributeDict( # call in htcondor_get_collector_start_dates + stdout=CONDOR_MASTER_STATUS_GRACEFUL_RETURN, stderr="", exit_code=0 + ), + ] + ) + def test_htcondor_get_collector_start_dates_graceful(self): + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + result = run_async(htcondor_get_collector_start_dates, options, executor) + + # We expect only machines from CONDOR_COLLECTOR_STATUS_RETURN to be included + datetime_now = datetime.fromtimestamp(NOW) + expected_times = { + "cloud-htcondor-rhel8.gridka.de": datetime_now, + "cloud-htcondor.gridka.de": datetime_now, + } + self.assertEqual(result, expected_times) + + # Ensure both commands were called with proper formatting + self.mock_executor.return_value.run_command.assert_any_call( + "condor_status -af:t Machine -pool my-htcondor.local -test -collector" + ) + self.mock_executor.return_value.run_command.assert_any_call( + 'condor_status -af:t Machine DaemonStartTime -constraint \'Machine == "cloud-htcondor-rhel8.gridka.de" || Machine == "cloud-htcondor.gridka.de"\' -pool my-htcondor.local -test -master' # noqa B950 + ) + + @mock_executor_run_command_new( + [ + AttributeDict( # call in htcondor_get_collectors + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), + AttributeDict( # call in htcondor_get_collector_start_dates + stdout=CONDOR_MASTER_STATUS_OLDEST_RETURN, stderr="", exit_code=0 + ), + ] + ) + def test_htcondor_get_collector_start_dates_oldest(self): + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + result = run_async(htcondor_get_collector_start_dates, options, executor) + + # We expect only machines from CONDOR_COLLECTOR_STATUS_RETURN to be included + expected_times = { + "cloud-htcondor-rhel8.gridka.de": datetime.fromtimestamp(1753949919), + "cloud-htcondor.gridka.de": datetime.fromtimestamp(NOW), + } + self.assertEqual(result, expected_times) + + # Ensure both commands were called with proper formatting + self.mock_executor.return_value.run_command.assert_any_call( + "condor_status -af:t Machine -pool my-htcondor.local -test -collector" + ) + self.mock_executor.return_value.run_command.assert_any_call( + 'condor_status -af:t Machine DaemonStartTime -constraint \'Machine == "cloud-htcondor-rhel8.gridka.de" || Machine == "cloud-htcondor.gridka.de"\' -pool my-htcondor.local -test -master' # noqa B950 + ) + + @mock_executor_run_command_new( + [ + AttributeDict( # call in htcondor_get_collectors + stdout=CONDOR_COLLECTOR_STATUS_RETURN, stderr="", exit_code=0 + ), + CommandExecutionFailure( + # failure in htcondor_get_collector_start_dates after collectors found + message="Master not reachable", + exit_code=1, + stderr="Master not reachable", + stdout="", + ), + ] + ) + def test_htcondor_get_collector_start_dates_failure_after_collectors(self): + options = self.config.BatchSystem.options + executor = self.mock_executor.return_value + + with self.assertLogs(level=logging.WARNING): + with self.assertRaises(CommandExecutionFailure): + run_async(htcondor_get_collector_start_dates, options, executor) diff --git a/tests/utilities/utilities.py b/tests/utilities/utilities.py index c5373bd4..c976dd3c 100644 --- a/tests/utilities/utilities.py +++ b/tests/utilities/utilities.py @@ -1,5 +1,7 @@ from tardis.utilities.attributedict import AttributeDict +from unittest.mock import AsyncMock +from typing import Union import asyncio import socket @@ -19,6 +21,21 @@ def get_free_port(ip: str): # from https://gist.github.com/dbrgn/3979133 return port +def mock_executor_run_command_new( + mock_call_side_effects: list[Union[AttributeDict, Exception]], +): + def decorator(func): + def wrapper(self): + executor = self.mock_executor.return_value + executor.run_command = AsyncMock(side_effect=mock_call_side_effects) + func(self) + executor.run_command = AsyncMock() + + return wrapper + + return decorator + + def mock_executor_run_command(stdout, stderr="", exit_code=0, raise_exception=None): def decorator(func): def wrapper(self): diff --git a/tests/utilities_t/test_asynccachemap.py b/tests/utilities_t/test_asynccachemap.py index 249cf918..b354bc08 100644 --- a/tests/utilities_t/test_asynccachemap.py +++ b/tests/utilities_t/test_asynccachemap.py @@ -6,6 +6,8 @@ from json.decoder import JSONDecodeError from datetime import datetime from datetime import timedelta +from functools import partial +from types import MappingProxyType from unittest import TestCase import logging @@ -72,9 +74,7 @@ def test_last_update(self): ) def test_eq_async_cache_map(self): - test_cache_map = AsyncCacheMap( - update_coroutine=self.async_cache_map._update_coroutine - ) + test_cache_map = self.async_cache_map # Since both objects have been recently initialized, all values (self._max_age, # self._last_update, self._data and self._lock) are still the defaults self.assertTrue(self.async_cache_map == test_cache_map) @@ -82,21 +82,39 @@ def test_eq_async_cache_map(self): # Test the opposite self.assertFalse(self.async_cache_map != test_cache_map) - # change default values - run_async(self.async_cache_map.update_status) - self.assertFalse(self.async_cache_map == test_cache_map) - - # update default values, self._last_update, self._lock still differ - run_async(test_cache_map.update_status) - self.assertFalse(self.async_cache_map == test_cache_map) - - # Assimilate lock, self._last_update still differs - test_cache_map._lock = self.async_cache_map._lock - self.assertFalse(self.async_cache_map == test_cache_map) - - # Make them equal again - test_cache_map._last_update = self.async_cache_map._last_update - self.assertTrue(self.async_cache_map == test_cache_map) - - # Test different class - self.assertFalse(self.async_cache_map == self.test_data) + def test_read_only_cache_returns_mappingproxy(self): + run_async(self.async_cache_map.update_status) # populate data + ro_cache = self.async_cache_map.read_only_cache + self.assertIsInstance(ro_cache, MappingProxyType) + self.assertEqual(dict(ro_cache), self.async_cache_map._data) + + # Update _data manually and check if read_only_cache reflects changes + self.async_cache_map._data["new_key"] = "new_value" + self.assertEqual(ro_cache["new_key"], "new_value") + + def test_read_only_cache_is_immutable(self): + ro_cache = self.async_cache_map.read_only_cache + with self.assertRaises(TypeError): + ro_cache["key"] = "value" # Attempt to modify should raise TypeError + + with self.assertRaises(TypeError): + del ro_cache["key"] # Attempt to delete should raise TypeError + + def test_update_coroutine_returns_original_when_flag_false(self): + # Ensure the flag is False + self.assertFalse(self.async_cache_map._provide_cache) + coro = self.async_cache_map.update_coroutine + self.assertEqual(coro, self.async_cache_map._update_coroutine) + + def test_update_coroutine_returns_partial_when_flag_true(self): + # Set the flag to True + self.async_cache_map._provide_cache = True + coro = self.async_cache_map.update_coroutine + self.assertIsInstance(coro, partial) + + # The partial should wrap the original coroutine + self.assertEqual(coro.func, self.async_cache_map._update_coroutine) + + # The first argument passed should be the read-only cache + self.assertIsInstance(coro.args[0], MappingProxyType) + self.assertEqual(coro.args[0], self.async_cache_map.read_only_cache) diff --git a/tests/utilities_t/test_attributedict.py b/tests/utilities_t/test_attributedict.py index 5d256d1f..1ad947f8 100644 --- a/tests/utilities_t/test_attributedict.py +++ b/tests/utilities_t/test_attributedict.py @@ -41,3 +41,25 @@ def test_del_via_attribute(self): with self.assertRaises(AttributeError): del self.test_dictionary.another_test + + def test_or_with_dict(self): + other = {"new": 42} + merged = self.test_dictionary | other + self.assertIsInstance(merged, AttributeDict) + self.assertEqual(merged["test"], 1) + self.assertEqual(merged["another_test"], 2) + self.assertEqual(merged["new"], 42) + + def test_or_with_dict_overwrites(self): + other = {"test": 99} + merged = self.test_dictionary | other + self.assertEqual(merged["test"], 99) # overwritten + self.assertEqual(merged["another_test"], 2) + + def test_or_with_attributedict(self): + other = AttributeDict(extra=123) + merged = self.test_dictionary | other + self.assertIsInstance(merged, AttributeDict) + self.assertEqual(merged["test"], 1) + self.assertEqual(merged["another_test"], 2) + self.assertEqual(merged["extra"], 123) diff --git a/tests/utilities_t/test_utils.py b/tests/utilities_t/test_utils.py index 185b6902..0b0ca3aa 100644 --- a/tests/utilities_t/test_utils.py +++ b/tests/utilities_t/test_utils.py @@ -8,6 +8,7 @@ disable_logging, drone_environment_to_str, htcondor_cmd_option_formatter, + htcondor_status_cmd_composer, load_states, submit_cmd_option_formatter, ) @@ -87,6 +88,35 @@ def test_htcondor_cmd_option_formatter(self): self.assertEqual(option_string, "") +class TestHTCondorStatusCmdComposer(TestCase): + def test_with_all_arguments(self): + attributes = AttributeDict(Machine="Machine", State="State") + options = AttributeDict(pool="my_pool", test=None) + constraint = "PartitionableSlot==True" + + result = htcondor_status_cmd_composer(attributes, options, constraint) + + self.assertEqual( + result, + "condor_status -af:t Machine State -constraint 'PartitionableSlot==True' -pool my_pool -test", # noqa B950 + ) + + def test_without_constraint_and_options(self): + attributes = AttributeDict(Machine="Machine", State="State") + + result = htcondor_status_cmd_composer(attributes) + + self.assertEqual(result, "condor_status -af:t Machine State") + + def test_with_only_options(self): + attributes = AttributeDict(Machine="Machine", State="State") + options = AttributeDict(pool="my_pool") + + result = htcondor_status_cmd_composer(attributes, options) + + self.assertEqual(result, "condor_status -af:t Machine State -pool my_pool") + + class TestCSVParser(TestCase): def test_csv_parser_htcondor(self): htcondor_input = "\n".join(