From bc5d714725d1a84f1bc849489fc90672df37bf73 Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Wed, 20 Aug 2025 14:27:18 -0300 Subject: [PATCH 1/6] please new linting rules and libs bump --- lib/charms/mysql/v0/async_replication.py | 67 +- lib/charms/mysql/v0/backups.py | 46 +- lib/charms/mysql/v0/mysql.py | 695 +++++++++++------- lib/charms/mysql/v0/s3_helpers.py | 19 +- lib/charms/mysql/v0/tls.py | 21 +- poetry.lock | 284 +++---- pyproject.toml | 37 +- scripts/log_rotate_dispatcher.py | 4 +- src/charm.py | 36 +- src/config.py | 4 +- src/constants.py | 11 +- src/k8s_helpers.py | 18 +- src/log_rotate_manager.py | 2 +- src/mysql_k8s_helpers.py | 37 +- src/relations/mysql.py | 27 +- src/relations/mysql_root.py | 28 +- src/upgrade.py | 20 +- src/utils.py | 4 +- tests/integration/backups.py | 24 +- tests/integration/helpers.py | 40 +- .../high_availability_helpers.py | 28 +- .../test_async_replication.py | 18 +- .../high_availability/test_k8s_endpoints.py | 12 +- .../high_availability/test_log_rotation.py | 6 +- .../high_availability/test_node_drain.py | 12 +- .../test_replication_reelection.py | 6 +- .../test_replication_scaling.py | 12 +- .../test_self_healing_network_cut.py | 36 +- .../test_self_healing_process_frozen.py | 30 +- .../test_self_healing_process_killed.py | 24 +- .../test_self_healing_stop_all.py | 6 +- .../test_self_healing_stop_primary.py | 18 +- .../high_availability/test_upgrade.py | 10 +- .../test_upgrade_rollback_incompat.py | 6 +- tests/integration/test_backup_aws.py | 6 +- tests/integration/test_backup_ceph.py | 6 +- tests/integration/test_backup_gcp.py | 6 +- tests/integration/test_charm.py | 6 +- tests/integration/test_tls.py | 64 +- tests/unit/test_database.py | 2 +- 40 files changed, 970 insertions(+), 768 deletions(-) diff --git a/lib/charms/mysql/v0/async_replication.py b/lib/charms/mysql/v0/async_replication.py index 278b17307a..8830bbcee2 100644 --- a/lib/charms/mysql/v0/async_replication.py +++ b/lib/charms/mysql/v0/async_replication.py @@ -14,6 +14,19 @@ MySQLPromoteClusterToPrimaryError, MySQLRejoinClusterError, ) +from constants import ( + BACKUPS_PASSWORD_KEY, + BACKUPS_USERNAME, + CLUSTER_ADMIN_PASSWORD_KEY, + CLUSTER_ADMIN_USERNAME, + MONITORING_PASSWORD_KEY, + MONITORING_USERNAME, + PEER, + ROOT_PASSWORD_KEY, + ROOT_USERNAME, + SERVER_CONFIG_PASSWORD_KEY, + SERVER_CONFIG_USERNAME, +) from ops import ( ActionEvent, ActiveStatus, @@ -30,21 +43,6 @@ ) from ops.framework import Object from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed -from typing_extensions import Optional - -from constants import ( - BACKUPS_PASSWORD_KEY, - BACKUPS_USERNAME, - CLUSTER_ADMIN_PASSWORD_KEY, - CLUSTER_ADMIN_USERNAME, - MONITORING_PASSWORD_KEY, - MONITORING_USERNAME, - PEER, - ROOT_PASSWORD_KEY, - ROOT_USERNAME, - SERVER_CONFIG_PASSWORD_KEY, - SERVER_CONFIG_USERNAME, -) if typing.TYPE_CHECKING: from charm import MySQLOperatorCharm @@ -54,7 +52,7 @@ # The unique Charmhub library identifier, never change it LIBID = "4de21f1a022c4e2c87ac8e672ec16f6a" LIBAPI = 0 -LIBPATCH = 7 +LIBPATCH = 10 RELATION_OFFER = "replication-offer" RELATION_CONSUMER = "replication" @@ -126,21 +124,21 @@ def cluster_set_name(self) -> str: return self._charm.app_peer_data["cluster-set-domain-name"] @property - def relation(self) -> Optional[Relation]: + def relation(self) -> Relation | None: """Relation.""" return self.model.get_relation(RELATION_OFFER) or self.model.get_relation( RELATION_CONSUMER ) @property - def relation_data(self) -> Optional[RelationDataContent]: + def relation_data(self) -> RelationDataContent | None: """Relation data.""" if not self.relation: return return self.relation.data[self.model.app] @property - def remote_relation_data(self) -> Optional[RelationDataContent]: + def remote_relation_data(self) -> RelationDataContent | None: """Remote relation data.""" if not self.relation or not self.relation.app: return @@ -148,6 +146,9 @@ def remote_relation_data(self) -> Optional[RelationDataContent]: def _on_promote_to_primary(self, event: ActionEvent) -> None: """Promote a standby cluster to primary.""" + if event.params.get("scope") != "cluster": + return + if not self._charm.unit.is_leader(): event.fail("Only the leader unit can promote a standby cluster") return @@ -359,7 +360,7 @@ def __init__(self, charm: "MySQLOperatorCharm"): self.framework.observe(self._charm.on.secret_changed, self._on_secret_change) @property - def state(self) -> Optional[States]: + def state(self) -> States | None: """State of the relation, on primary side.""" if not self.relation: return States.UNINITIALIZED @@ -405,12 +406,10 @@ def idle(self) -> bool: # transitional state between relation created and setup_action return False - if self.state not in [States.READY, States.UNINITIALIZED]: - return False - return True + return self.state in [States.READY, States.UNINITIALIZED] @property - def secret(self) -> Optional[Secret]: + def secret(self) -> Secret | None: """Return the async replication secret.""" if not self.relation: return @@ -428,7 +427,7 @@ def _get_secret(self) -> Secret: def _on_create_replication(self, event: ActionEvent): """Promote the offer side to primary on initial setup.""" - if not self._charm.app_peer_data.get("async-ready") == "true": + if self._charm.app_peer_data.get("async-ready") != "true": event.fail("Relation created but not ready") return @@ -489,10 +488,8 @@ def _on_offer_created(self, event: RelationCreatedEvent): ): # Test for a broken relation on the primary side logger.error( - ( - "Cannot setup async relation with primary cluster in blocked/read-only state\n" - "Remove the relation." - ) + "Cannot setup async relation with primary cluster in blocked/read-only state\n" + "Remove the relation." ) message = f"Cluster is in a blocked state. Remove {RELATION_OFFER} relation" self._charm.unit.status = BlockedStatus(message) @@ -501,10 +498,8 @@ def _on_offer_created(self, event: RelationCreatedEvent): if not self.model.get_relation(RELATION_OFFER): # safeguard against a deferred event a previous relation. logger.error( - ( - "Relation created running against removed relation.\n" - f"Remove {RELATION_OFFER} relation and retry." - ) + "Relation created running against removed relation.\n" + f"Remove {RELATION_OFFER} relation and retry." ) self._charm.unit.status = BlockedStatus(f"Remove {RELATION_OFFER} relation and retry") return @@ -639,7 +634,7 @@ def __init__(self, charm: "MySQLOperatorCharm"): self.framework.observe(self._charm.on.secret_changed, self._on_secret_change) @property - def state(self) -> Optional[States]: + def state(self) -> States | None: """State of the relation, on consumer side.""" if not self.relation: return None @@ -716,7 +711,7 @@ def _async_replication_credentials(self) -> dict[str, str]: secret = self._obtain_secret() return secret.peek_content() - def _get_endpoint(self) -> Optional[str]: + def _get_endpoint(self) -> str | None: """Get endpoint to be used by the primary cluster. This is the address in which the unit must be reachable from the primary cluster. @@ -885,7 +880,7 @@ def _on_consumer_non_leader_created(self, _): # set waiting state to inhibit auto recovery, only when not already set if self._charm.unit.is_leader(): return - if not self._charm.unit_peer_data.get("member-state") == "waiting": + if self._charm.unit_peer_data.get("member-state") != "waiting": self._charm.unit_peer_data["member-state"] = "waiting" self._charm.unit.status = WaitingStatus("waiting replica cluster be configured") diff --git a/lib/charms/mysql/v0/backups.py b/lib/charms/mysql/v0/backups.py index 7ffb87a7b0..b4bfdd15f7 100644 --- a/lib/charms/mysql/v0/backups.py +++ b/lib/charms/mysql/v0/backups.py @@ -50,7 +50,6 @@ def is_unit_blocked(self) -> bool: import pathlib import re import typing -from typing import Dict, List, Optional, Tuple from charms.data_platform_libs.v0.s3 import ( CredentialsChangedEvent, @@ -88,17 +87,16 @@ def is_unit_blocked(self) -> bool: list_backups_in_s3_path, upload_content_to_s3, ) -from ops.charm import ActionEvent -from ops.framework import Object -from ops.jujuversion import JujuVersion -from ops.model import BlockedStatus, MaintenanceStatus - from constants import ( MYSQL_DATA_DIR, PEER, SERVER_CONFIG_PASSWORD_KEY, SERVER_CONFIG_USERNAME, ) +from ops.charm import ActionEvent +from ops.framework import Object +from ops.jujuversion import JujuVersion +from ops.model import BlockedStatus, MaintenanceStatus logger = logging.getLogger(__name__) @@ -113,7 +111,7 @@ def is_unit_blocked(self) -> bool: # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 14 +LIBPATCH = 16 ANOTHER_S3_CLUSTER_REPOSITORY_ERROR_MESSAGE = "S3 repository claimed by another cluster" MOVE_RESTORED_CLUSTER_TO_ANOTHER_S3_REPOSITORY_ERROR = ( @@ -150,7 +148,7 @@ def _s3_integrator_relation_exists(self) -> bool: """Returns whether a relation with the s3-integrator exists.""" return bool(self.model.get_relation(S3_INTEGRATOR_RELATION_NAME)) - def _retrieve_s3_parameters(self) -> Tuple[Dict[str, str], List[str]]: + def _retrieve_s3_parameters(self) -> tuple[dict[str, str], list[str]]: """Retrieve S3 parameters from the S3 integrator relation. Returns: tuple of (s3_parameters, missing_required_parameters) @@ -196,7 +194,7 @@ def _upload_logs_to_s3( stdout: str, stderr: str, log_filename: str, - s3_parameters: Dict[str, str], + s3_parameters: dict[str, str], ) -> bool: """Upload logs to S3 at the specified location. @@ -219,7 +217,7 @@ def _upload_logs_to_s3( # ------------------ List Backups ------------------ @staticmethod - def _format_backups_list(backup_list: List[Tuple[str, str]]) -> str: + def _format_backups_list(backup_list: list[tuple[str, str]]) -> str: """Formats the provided list of backups as a table.""" backups = [f"{'backup-id':<21} | {'backup-type':<12} | backup-status"] @@ -250,9 +248,7 @@ def _on_list_backups(self, event: ActionEvent) -> None: event.set_results({"backups": self._format_backups_list(backups)}) except Exception as e: error_message = ( - getattr(e, "message") - if hasattr(e, "message") - else "Failed to retrieve backup ids from S3" + e.message if hasattr(e, "message") else "Failed to retrieve backup ids from S3" ) logger.error(error_message) event.fail(error_message) @@ -313,7 +309,7 @@ def _on_create_backup(self, event: ActionEvent) -> None: f"Model Name: {self.model.name}\n" f"Application Name: {self.model.app.name}\n" f"Unit Name: {self.charm.unit.name}\n" - f"Juju Version: {str(juju_version)}\n" + f"Juju Version: {juju_version!s}\n" ) if not upload_content_to_s3(metadata, f"{backup_path}.metadata", s3_parameters): @@ -359,7 +355,7 @@ def _on_create_backup(self, event: ActionEvent) -> None: }) self.charm._on_update_status(None) - def _can_unit_perform_backup(self) -> Tuple[bool, Optional[str]]: + def _can_unit_perform_backup(self) -> tuple[bool, str | None]: """Validates whether this unit can perform a backup. Returns: tuple of (success, error_message) @@ -390,7 +386,7 @@ def _can_unit_perform_backup(self) -> Tuple[bool, Optional[str]]: return True, None - def _pre_backup(self) -> Tuple[bool, Optional[str]]: + def _pre_backup(self) -> tuple[bool, str | None]: """Runs operations required before performing a backup. Returns: tuple of (success, error_message) @@ -415,7 +411,7 @@ def _pre_backup(self) -> Tuple[bool, Optional[str]]: return True, None - def _backup(self, backup_path: str, s3_parameters: Dict) -> Tuple[bool, Optional[str]]: + def _backup(self, backup_path: str, s3_parameters: dict) -> tuple[bool, str | None]: """Runs the backup operations. Args: @@ -450,7 +446,7 @@ def _backup(self, backup_path: str, s3_parameters: Dict) -> Tuple[bool, Optional return True, None - def _post_backup(self) -> Tuple[bool, Optional[str]]: + def _post_backup(self) -> tuple[bool, str | None]: """Runs operations required after performing a backup. Returns: tuple of (success, error_message) @@ -613,7 +609,7 @@ def _on_restore(self, event: ActionEvent) -> None: # noqa: C901 # update status as soon as possible self.charm._on_update_status(None) - def _pre_restore(self) -> Tuple[bool, str]: + def _pre_restore(self) -> tuple[bool, str]: """Perform operations that need to be done before performing a restore. Returns: tuple of (success, error_message) @@ -635,7 +631,7 @@ def _pre_restore(self) -> Tuple[bool, str]: return True, "" - def _restore(self, backup_id: str, s3_parameters: Dict[str, str]) -> Tuple[bool, bool, str]: + def _restore(self, backup_id: str, s3_parameters: dict[str, str]) -> tuple[bool, bool, str]: """Run the restore operations. Args: @@ -687,7 +683,7 @@ def _restore(self, backup_id: str, s3_parameters: Dict[str, str]) -> Tuple[bool, return True, True, "" - def _clean_data_dir_and_start_mysqld(self) -> Tuple[bool, str]: + def _clean_data_dir_and_start_mysqld(self) -> tuple[bool, str]: """Run idempotent operations run after restoring a backup. Returns tuple of (success, error_message) @@ -711,8 +707,8 @@ def _clean_data_dir_and_start_mysqld(self) -> Tuple[bool, str]: return True, "" def _pitr_restore( - self, restore_to_time: str, s3_parameters: Dict[str, str] - ) -> Tuple[bool, str]: + self, restore_to_time: str, s3_parameters: dict[str, str] + ) -> tuple[bool, str]: try: logger.info("Restoring point-in-time-recovery") stdout, stderr = self.charm._mysql.restore_pitr( @@ -728,7 +724,7 @@ def _pitr_restore( return False, f"Failed to restore point-in-time-recovery to the {restore_to_time}" return True, "" - def _post_restore(self) -> Tuple[bool, str]: + def _post_restore(self) -> tuple[bool, str]: """Run operations required after restoring a backup. Returns: tuple of (success, error_message) @@ -836,7 +832,7 @@ def _on_s3_credentials_gone(self, event: CredentialsGoneEvent) -> None: "Exception is occurred when trying to stop binlogs collecting after S3 relation depart. It may be a leader departure" ) - def get_binlogs_collector_config(self) -> Dict[str, str]: + def get_binlogs_collector_config(self) -> dict[str, str]: """Return binlogs collector service config file. Returns: dict of binlogs collector service config diff --git a/lib/charms/mysql/v0/mysql.py b/lib/charms/mysql/v0/mysql.py index 53b2515083..f002baab28 100644 --- a/lib/charms/mysql/v0/mysql.py +++ b/lib/charms/mysql/v0/mysql.py @@ -81,27 +81,12 @@ def wait_until_mysql_connection(self) -> None: from typing import ( TYPE_CHECKING, Any, - Dict, - List, Literal, - Optional, - Tuple, - Union, get_args, ) import ops from charms.data_platform_libs.v0.data_interfaces import DataPeerData, DataPeerUnitData -from ops.charm import ActionEvent, CharmBase, RelationBrokenEvent -from ops.model import Unit -from tenacity import ( - retry, - retry_if_exception_type, - stop_after_attempt, - wait_fixed, - wait_random, -) - from constants import ( BACKUPS_PASSWORD_KEY, BACKUPS_USERNAME, @@ -120,6 +105,15 @@ def wait_until_mysql_connection(self) -> None: SERVER_CONFIG_PASSWORD_KEY, SERVER_CONFIG_USERNAME, ) +from ops.charm import ActionEvent, CharmBase, RelationBrokenEvent +from ops.model import Unit +from tenacity import ( + retry, + retry_if_exception_type, + stop_after_attempt, + wait_fixed, + wait_random, +) from utils import generate_random_password logger = logging.getLogger(__name__) @@ -133,7 +127,7 @@ def wait_until_mysql_connection(self) -> None: # Increment this major API version when introducing breaking changes LIBAPI = 0 -LIBPATCH = 89 +LIBPATCH = 93 UNIT_TEARDOWN_LOCKNAME = "unit-teardown" UNIT_ADD_LOCKNAME = "unit-add" @@ -148,8 +142,9 @@ def wait_until_mysql_connection(self) -> None: MIM_MEM_BUFFERS = 200 * BYTES_1MiB ADMIN_PORT = 33062 -SECRET_INTERNAL_LABEL = "secret-id" -SECRET_DELETED_LABEL = "None" +# Labels are not confidential +SECRET_INTERNAL_LABEL = "secret-id" # noqa: S105 +SECRET_DELETED_LABEL = "None" # noqa: S105 APP_SCOPE = "app" UNIT_SCOPE = "unit" @@ -170,12 +165,12 @@ def __init__(self, message: str = "") -> None: def __repr__(self): """String representation of the Error class.""" - return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args) + return f"<{type(self).__module__}.{type(self).__name__} {self.args}>" @property def name(self): """Return a string representation of the model plus class.""" - return "<{}.{}>".format(type(self).__module__, type(self).__name__) + return f"<{type(self).__module__}.{type(self).__name__}>" class MySQLConfigureMySQLUsersError(Error): @@ -230,6 +225,10 @@ class MySQLAddInstanceToClusterError(Error): """Exception raised when there is an issue add an instance to the MySQL InnoDB cluster.""" +class MySQLRejoinInstanceToClusterError(Error): + """Exception raised when there is an issue rejoining an instance to the MySQL InnoDB cluster.""" + + class MySQLRemoveInstanceRetryError(Error): """Exception raised when there is an issue removing an instance. @@ -291,6 +290,10 @@ class MySQLRebootFromCompleteOutageError(Error): """Exception raised when there is an issue rebooting from complete outage.""" +class MySQLForceQuorumFromInstanceError(Error): + """Exception raised when there is an issue forcing quorum from an instance.""" + + class MySQLSetInstanceOfflineModeError(Error): """Exception raised when there is an issue setting instance as offline.""" @@ -476,7 +479,11 @@ def __init__(self, *args): self.framework.observe(self.on.get_cluster_status_action, self._get_cluster_status) self.framework.observe(self.on.get_password_action, self._on_get_password) self.framework.observe(self.on.set_password_action, self._on_set_password) + self.framework.observe(self.on.promote_to_primary_action, self._on_promote_to_primary) self.framework.observe(self.on.recreate_cluster_action, self._recreate_cluster) + self.framework.observe( + self.on[PEER].relation_changed, self.check_topology_timestamp_change + ) # Set in some event handlers in order to avoid passing event down a chain # of methods @@ -582,6 +589,43 @@ def _get_cluster_status(self, event: ActionEvent) -> None: "message": "Failed to read cluster status. See logs for more information.", }) + def _on_promote_to_primary(self, event: ActionEvent) -> None: + """Action for setting this unit as the cluster primary.""" + if event.params.get("scope") != "unit": + return + + if self._mysql.is_unit_primary(self.unit_label): + event.set_results({ + "success": False, + "message": "Unit is already primary", + }) + return + + if event.params.get("force"): + # Failover + logger.info("Forcing quorum from instance") + try: + self._mysql.force_quorum_from_instance() + except MySQLForceQuorumFromInstanceError: + logger.exception("Failed to force quorum from instance") + event.fail("Failed to force quorum from instance. See logs for more information.") + else: + # Switchover + logger.info("Setting unit as cluster primary") + try: + self._mysql.set_cluster_primary(self.get_unit_hostname()) + except MySQLSetClusterPrimaryError: + logger.exception("Failed to set cluster primary") + event.fail("Failed to change cluster primary. See logs for more information.") + + # Use peer relation to trigger endpoint update + # refer to mysql_provider.py + self.unit_peer_data.update({"topology-change-timestamp": str(int(time.time()))}) + event.set_results({ + "success": True, + "message": "Unit is already primary", + }) + def _recreate_cluster(self, event: ActionEvent) -> None: """Action used to recreate the cluster, for special cases.""" if not self.unit.is_leader(): @@ -623,8 +667,39 @@ def create_cluster(self) -> None: self.unit_peer_data.update({"member-state": state, "member-role": role}) + @abstractmethod + def update_endpoints(self) -> None: + """Update the endpoints for the cluster.""" + raise NotImplementedError + + def check_topology_timestamp_change(self, _) -> None: + """Check for cluster topology changes and trigger endpoint update if needed. + + Used for trigger endpoint updates for non typical events like, add/remove unit + or update status. + """ + topology_change_set = { + int(self.peers.data[unit]["topology-change-timestamp"]) + for unit in self.peers.units + if self.peers.data[unit].get("topology-change-timestamp") + } + if not topology_change_set: + # no topology change detected + return + topology_change = int(self.unit_peer_data.get("topology-change-timestamp", "0")) + max_topology_change = max(topology_change_set) + if self.unit.is_leader() and max_topology_change > topology_change: + # update endpoints required + self.update_endpoints() + return + + # sync timestamp and trigger relation changed + self.unit_peer_data.update({ + "topology-change-timestamp": str(max(max_topology_change, topology_change)) + }) + @property - def peers(self) -> Optional[ops.model.Relation]: + def peers(self) -> ops.model.Relation | None: """Retrieve the peer relation.""" return self.model.get_relation(PEER) @@ -636,11 +711,10 @@ def cluster_initialized(self) -> bool: for unit in self.app_units: try: - if unit != self.unit and self._mysql.cluster_metadata_exists( - self.get_unit_address(unit, PEER) - ): - return True - elif self._mysql.cluster_metadata_exists(): + if ( + unit != self.unit + and self._mysql.cluster_metadata_exists(self.get_unit_address(unit, PEER)) + ) or self._mysql.cluster_metadata_exists(): return True except MySQLClusterMetadataExistsError: pass @@ -648,7 +722,7 @@ def cluster_initialized(self) -> bool: return False @property - def only_one_cluster_node_thats_uninitialized(self) -> Optional[bool]: + def only_one_cluster_node_thats_uninitialized(self) -> bool | None: """Check if only a single cluster node exists across all units.""" if not self.app_peer_data.get("cluster-name"): return None @@ -687,7 +761,7 @@ def unit_configured(self) -> bool: ) @property - def app_peer_data(self) -> Union[ops.RelationDataContent, dict]: + def app_peer_data(self) -> ops.RelationDataContent | dict: """Application peer relation data object.""" if self.peers is None: return {} @@ -695,7 +769,7 @@ def app_peer_data(self) -> Union[ops.RelationDataContent, dict]: return self.peers.data[self.app] @property - def unit_peer_data(self) -> Union[ops.RelationDataContent, dict]: + def unit_peer_data(self) -> ops.RelationDataContent | dict: """Unit peer relation data object.""" if self.peers is None: return {} @@ -780,7 +854,7 @@ def peer_relation_data(self, scope: Scopes) -> DataPeerData: elif scope == UNIT_SCOPE: return self.peer_relation_unit - def get_cluster_endpoints(self, relation_name: str) -> Tuple[str, str, str]: + def get_cluster_endpoints(self, relation_name: str) -> tuple[str, str, str]: """Return (rw, ro, offline) endpoints tuple names or IPs.""" repl_topology = self._mysql.get_cluster_topology() repl_cluster = self._mysql.is_cluster_replica() @@ -800,7 +874,11 @@ def get_cluster_endpoints(self, relation_name: str) -> Tuple[str, str, str]: if v["status"] == MySQLMemberState.RECOVERING: continue - address = f"{self.get_unit_address(unit_labels[k], relation_name)}:3306" + # skip if unit not available in unit_labels + if unit_label := unit_labels.get(k): + address = f"{self.get_unit_address(unit_label, relation_name)}:3306" + else: + continue if v["status"] != MySQLMemberState.ONLINE: no_endpoints.add(address) @@ -820,7 +898,7 @@ def get_secret( self, scope: Scopes, key: str, - ) -> Optional[str]: + ) -> str | None: """Get secret from the secret storage. Retrieve secret from juju secrets backend if secret exists there. @@ -836,14 +914,16 @@ def get_secret( # NOTE: here we purposefully search both in secrets and in databag by using # the fetch_my_relation_field instead of peer_relation_data(scope).get_secrets(). - if not (value := self.peer_relation_data(scope).fetch_my_relation_field(peers.id, key)): - if key in SECRET_KEY_FALLBACKS: - value = self.peer_relation_data(scope).fetch_my_relation_field( - peers.id, SECRET_KEY_FALLBACKS[key] - ) + if ( + not (value := self.peer_relation_data(scope).fetch_my_relation_field(peers.id, key)) + and key in SECRET_KEY_FALLBACKS + ): + value = self.peer_relation_data(scope).fetch_my_relation_field( + peers.id, SECRET_KEY_FALLBACKS[key] + ) return value - def set_secret(self, scope: Scopes, key: str, value: Optional[str]) -> None: + def set_secret(self, scope: Scopes, key: str, value: str | None) -> None: """Set a secret in the secret storage.""" if scope not in get_args(Scopes): raise MySQLSecretError(f"Invalid secret {scope=}") @@ -889,7 +969,8 @@ def generate_random_hash() -> str: A hash based on a random string. """ random_characters = generate_random_password(10) - return hashlib.md5(random_characters.encode("utf-8")).hexdigest() + # TODO Should we be using md5 here? + return hashlib.md5(random_characters.encode("utf-8")).hexdigest() # noqa: S324 class MySQLMemberState(str, enum.Enum): @@ -972,7 +1053,7 @@ def __init__( self.backups_password, ] - def instance_def(self, user: str, host: Optional[str] = None) -> str: + def instance_def(self, user: str, host: str | None = None) -> str: """Return instance definition used on mysqlsh. Args: @@ -997,8 +1078,8 @@ def render_mysqld_configuration( # noqa: C901 audit_log_enabled: bool, audit_log_strategy: str, audit_log_policy: str, - memory_limit: Optional[int] = None, - experimental_max_connections: Optional[int] = None, + memory_limit: int | None = None, + experimental_max_connections: int | None = None, binlog_retention_days: int, snap_common: str = "", ) -> tuple[str, dict]: @@ -1055,8 +1136,9 @@ def render_mysqld_configuration( # noqa: C901 # do not enable slow query logs, but specify a log file path in case # the admin enables them manually config["mysqld"] = { - "bind-address": "0.0.0.0", - "mysqlx-bind-address": "0.0.0.0", + # All interfaces bind expected + "bind-address": "0.0.0.0", # noqa: S104 + "mysqlx-bind-address": "0.0.0.0", # noqa: S104 "admin_address": self.instance_address, "report_host": self.instance_address, "max_connections": str(max_connections), @@ -1138,9 +1220,9 @@ def configure_mysql_users(self) -> None: configure_users_commands, password=self.root_password, ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to configure users for: {self.instance_address}") - raise MySQLConfigureMySQLUsersError + raise MySQLConfigureMySQLUsersError from e def _plugin_file_exists(self, plugin_file_name: str) -> bool: """Check if the plugin file exists. @@ -1197,9 +1279,9 @@ def install_plugins(self, plugins: list[str]) -> None: user=self.server_config_user, password=self.server_config_password, ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to install {plugin=}") # type: ignore - raise MySQLPluginInstallError + raise MySQLPluginInstallError from e except MySQLGetVariableError: # workaround for config changed triggered after failed upgrade # the check fails for charms revisions not using admin address @@ -1231,11 +1313,11 @@ def uninstall_plugins(self, plugins: list[str]) -> None: user=self.server_config_user, password=self.server_config_password, ) - except MySQLClientError: + except MySQLClientError as e: logger.error( f"Failed to uninstall {plugin=}", # type: ignore ) - raise MySQLPluginInstallError + raise MySQLPluginInstallError from e def _get_installed_plugins(self) -> set[str]: """Return a set of explicitly installed plugins.""" @@ -1254,7 +1336,8 @@ def _get_installed_plugins(self) -> set[str]: def does_mysql_user_exist(self, username: str, hostname: str) -> bool: """Checks if a mysql user already exists.""" user_existence_commands = ( - f"select user from mysql.user where user = '{username}' and host = '{hostname}'", + # Vars generated by the charm + f"select user from mysql.user where user = '{username}' and host = '{hostname}'", # noqa: S608 ) try: @@ -1264,9 +1347,9 @@ def does_mysql_user_exist(self, username: str, hostname: str) -> bool: password=self.server_config_password, ) return len(output) == 1 - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to check for existence of mysql user {username}@{hostname}") - raise MySQLCheckUserExistenceError() + raise MySQLCheckUserExistenceError() from e def configure_mysqlrouter_user( self, username: str, password: str, hostname: str, unit_name: str @@ -1307,9 +1390,9 @@ def configure_mysqlrouter_user( password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to configure mysqlrouter {username=}") - raise MySQLConfigureRouterUserError + raise MySQLConfigureRouterUserError from e def create_application_database_and_scoped_user( self, @@ -1318,7 +1401,7 @@ def create_application_database_and_scoped_user( password: str, hostname: str, *, - unit_name: Optional[str] = None, + unit_name: str | None = None, create_database: bool = True, ) -> None: """Create an application database and a user scoped to the created database.""" @@ -1355,7 +1438,7 @@ def create_application_database_and_scoped_user( logger.error( f"Failed to create application database {database_name} and scoped user {username}@{hostname}" ) - raise MySQLCreateApplicationDatabaseAndScopedUserError(e.message) + raise MySQLCreateApplicationDatabaseAndScopedUserError(e.message) from e @staticmethod def _get_statements_to_delete_users_with_attribute( @@ -1366,9 +1449,10 @@ def _get_statements_to_delete_users_with_attribute( If the value of the attribute is a string, include single quotes in the string. (e.g. "'bar'") """ + # Variables come from the charm return [ ( - "session.run_sql(\"SELECT IFNULL(CONCAT('DROP USER ', GROUP_CONCAT(QUOTE(USER)," + "session.run_sql(\"SELECT IFNULL(CONCAT('DROP USER ', GROUP_CONCAT(QUOTE(USER)," # noqa: S608 " '@', QUOTE(HOST))), 'SELECT 1') INTO @sql FROM INFORMATION_SCHEMA.USER_ATTRIBUTES" f" WHERE ATTRIBUTE->'$.{attribute_name}'={attribute_value}\")" ), @@ -1382,9 +1466,10 @@ def get_mysql_router_users_for_unit( ) -> list[RouterUser]: """Get users for related MySQL Router unit.""" relation_user = f"relation-{relation_id}" + # Variables come from the charm command = [ ( - "result = session.run_sql(\"SELECT USER, ATTRIBUTE->>'$.router_id' FROM " + "result = session.run_sql(\"SELECT USER, ATTRIBUTE->>'$.router_id' FROM " # noqa: S608 f"INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.created_by_user'='{relation_user}' " f"AND ATTRIBUTE->'$.created_by_juju_unit'='{mysql_router_unit_name}'\")" ), @@ -1401,7 +1486,7 @@ def get_mysql_router_users_for_unit( logger.error( f"Failed to get MySQL Router users for relation {relation_id} and unit {mysql_router_unit_name}" ) - raise MySQLGetRouterUsersError(e.message) + raise MySQLGetRouterUsersError(e.message) from e rows = json.loads(output) return [RouterUser(username=row[0], router_id=row[1]) for row in rows] @@ -1422,7 +1507,7 @@ def delete_users_for_unit(self, unit_name: str) -> None: ) except MySQLClientError as e: logger.error(f"Failed to query and delete users for unit {unit_name}") - raise MySQLDeleteUsersForUnitError(e.message) + raise MySQLDeleteUsersForUnitError(e.message) from e def delete_users_for_relation(self, username: str) -> None: """Delete users for a relation.""" @@ -1444,7 +1529,7 @@ def delete_users_for_relation(self, username: str) -> None: ) except MySQLClientError as e: logger.error(f"Failed to delete {username=}") - raise MySQLDeleteUsersForRelationError(e.message) + raise MySQLDeleteUsersForRelationError(e.message) from e def delete_user(self, username: str) -> None: """Delete user.""" @@ -1461,7 +1546,7 @@ def delete_user(self, username: str) -> None: ) except MySQLClientError as e: logger.error(f"Failed to delete user {username}") - raise MySQLDeleteUserError(e.message) + raise MySQLDeleteUserError(e.message) from e def remove_router_from_cluster_metadata(self, router_id: str) -> None: """Remove MySQL Router from InnoDB Cluster metadata.""" @@ -1478,14 +1563,14 @@ def remove_router_from_cluster_metadata(self, router_id: str) -> None: ) except MySQLClientError as e: logger.error(f"Failed to remove router from metadata with ID {router_id}") - raise MySQLRemoveRouterFromMetadataError(e.message) + raise MySQLRemoveRouterFromMetadataError(e.message) from e def set_dynamic_variable( self, variable: str, value: str, persist: bool = False, - instance_address: Optional[str] = None, + instance_address: str | None = None, ) -> None: """Set a dynamic variable value for the instance.""" # escape variable values when needed @@ -1504,9 +1589,9 @@ def set_dynamic_variable( password=self.server_config_password, host=self.instance_def(self.server_config_user, instance_address), ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to set {variable=} to {value=}") - raise MySQLSetVariableError + raise MySQLSetVariableError from e def get_variable_value(self, variable: str) -> str: """Get the value of a variable.""" @@ -1522,9 +1607,9 @@ def get_variable_value(self, variable: str) -> str: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to get value for {variable=}") - raise MySQLGetVariableError + raise MySQLGetVariableError from e rows = json.loads(output) return rows[0][1] @@ -1556,9 +1641,9 @@ def configure_instance(self, create_cluster_admin: bool = True) -> None: host=self.instance_def(self.server_config_user), ) self.wait_until_mysql_connection() - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to configure instance {self.instance_address}") - raise MySQLConfigureInstanceError + raise MySQLConfigureInstanceError from e def create_cluster(self, unit_label: str) -> None: """Create an InnoDB cluster with Group Replication enabled.""" @@ -1581,9 +1666,9 @@ def create_cluster(self, unit_label: str) -> None: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to create cluster on instance: {self.instance_address}") - raise MySQLCreateClusterError + raise MySQLCreateClusterError from e def create_cluster_set(self) -> None: """Create a cluster set for the cluster on cluster primary.""" @@ -1610,8 +1695,8 @@ def create_replica_cluster( endpoint: str, replica_cluster_name: str, instance_label: str, - donor: Optional[str] = None, - method: Optional[str] = "auto", + donor: str | None = None, + method: str | None = "auto", ) -> None: """Create a replica cluster from the primary cluster.""" options = { @@ -1643,7 +1728,7 @@ def create_replica_cluster( host=self.instance_def(self.server_config_user), exception_as_warning=log_exception, ) - except MySQLClientError: + except MySQLClientError as e: if method == "auto": logger.warning( "Failed to create replica cluster with auto method, fallback to clone method" @@ -1657,7 +1742,7 @@ def create_replica_cluster( ) else: logger.error("Failed to create replica cluster") - raise MySQLCreateReplicaClusterError + raise MySQLCreateReplicaClusterError from e def promote_cluster_to_primary(self, cluster_name: str, force: bool = False) -> None: """Promote a cluster to become the primary cluster on the cluster set.""" @@ -1683,9 +1768,9 @@ def promote_cluster_to_primary(self, cluster_name: str, force: bool = False) -> password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to promote cluster to primary") - raise MySQLPromoteClusterToPrimaryError + raise MySQLPromoteClusterToPrimaryError from e def fence_writes(self) -> None: """Fence writes on the primary cluster.""" @@ -1701,9 +1786,9 @@ def fence_writes(self) -> None: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to fence writes on cluster") - raise MySQLFencingWritesError + raise MySQLFencingWritesError from e def unfence_writes(self) -> None: """Unfence writes on the primary cluster and reset read_only flag.""" @@ -1720,11 +1805,11 @@ def unfence_writes(self) -> None: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to resume writes on primary cluster") - raise MySQLFencingWritesError + raise MySQLFencingWritesError from e - def is_cluster_writes_fenced(self) -> Optional[bool]: + def is_cluster_writes_fenced(self) -> bool | None: """Check if the cluster is fenced against writes.""" status = self.get_cluster_status() if not status: @@ -1732,7 +1817,7 @@ def is_cluster_writes_fenced(self) -> Optional[bool]: return status["defaultreplicaset"]["status"] == MySQLClusterState.FENCED - def is_cluster_in_cluster_set(self, cluster_name: str) -> Optional[bool]: + def is_cluster_in_cluster_set(self, cluster_name: str) -> bool | None: """Check if a cluster is in the cluster set.""" cs_status = self.get_cluster_set_status(extended=0) @@ -1741,7 +1826,7 @@ def is_cluster_in_cluster_set(self, cluster_name: str) -> Optional[bool]: return cluster_name in cs_status["clusters"] - def cluster_metadata_exists(self, from_instance: Optional[str] = None) -> bool: + def cluster_metadata_exists(self, from_instance: str | None = None) -> bool: """Check if this cluster metadata exists on database. Use mysqlsh when querying clusters from remote instances. However, use @@ -1774,11 +1859,11 @@ def cluster_metadata_exists(self, from_instance: Optional[str] = None) -> bool: timeout=60, exception_as_warning=True, ) - except MySQLClientError: + except MySQLClientError as e: logger.warning(f"Failed to check if cluster metadata exists {from_instance=}") raise MySQLClusterMetadataExistsError( f"Failed to check if cluster metadata exists {from_instance=}" - ) + ) from e return self.cluster_name in output @@ -1791,9 +1876,11 @@ def cluster_metadata_exists(self, from_instance: Optional[str] = None) -> bool: exception_as_warning=True, log_errors=False, ) - except MySQLClientError: + except MySQLClientError as e: logger.warning("Failed to check if local cluster metadata exists") - raise MySQLClusterMetadataExistsError("Failed to check if cluster metadata exists") + raise MySQLClusterMetadataExistsError( + "Failed to check if cluster metadata exists" + ) from e cluster_names = [entry[0].strip() for entry in output] return self.cluster_name in cluster_names @@ -1816,9 +1903,9 @@ def rejoin_cluster(self, cluster_name) -> None: ) logger.info(f"Rejoined {cluster_name=}") - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to rejoin cluster") - raise MySQLRejoinClusterError + raise MySQLRejoinClusterError from e def remove_replica_cluster(self, replica_cluster_name: str, force: bool = False) -> None: """Remove a replica cluster from the cluster-set.""" @@ -1839,19 +1926,20 @@ def remove_replica_cluster(self, replica_cluster_name: str, force: bool = False) password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to remove replica cluster") - raise MySQLRemoveReplicaClusterError + raise MySQLRemoveReplicaClusterError from e def initialize_juju_units_operations_table(self) -> None: """Initialize the mysql.juju_units_operations table using the serverconfig user.""" + # Variables come from the charm initialize_table_commands = ( "DROP TABLE IF EXISTS mysql.juju_units_operations", "CREATE TABLE mysql.juju_units_operations (task varchar(20), executor " "varchar(20), status varchar(20), primary key(task))", - f"INSERT INTO mysql.juju_units_operations values ('{UNIT_TEARDOWN_LOCKNAME}', '', " + f"INSERT INTO mysql.juju_units_operations values ('{UNIT_TEARDOWN_LOCKNAME}', '', " # noqa: S608 "'not-started') ON DUPLICATE KEY UPDATE executor = '', status = 'not-started'", - f"INSERT INTO mysql.juju_units_operations values ('{UNIT_ADD_LOCKNAME}', '', " + f"INSERT INTO mysql.juju_units_operations values ('{UNIT_ADD_LOCKNAME}', '', " # noqa: S608 "'not-started') ON DUPLICATE KEY UPDATE executor = '', status = 'not-started'", ) @@ -1865,17 +1953,17 @@ def initialize_juju_units_operations_table(self) -> None: user=self.server_config_user, password=self.server_config_password, ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to initialize mysql.juju_units_operations table with error") - raise MySQLInitializeJujuOperationsTableError + raise MySQLInitializeJujuOperationsTableError from e def add_instance_to_cluster( self, *, instance_address: str, instance_unit_label: str, - from_instance: Optional[str] = None, - lock_instance: Optional[str] = None, + from_instance: str | None = None, + lock_instance: str | None = None, method: str = "auto", ) -> None: """Add an instance to the InnoDB cluster.""" @@ -1920,12 +2008,12 @@ def add_instance_to_cluster( exception_as_warning=log_exception, ) - except MySQLClientError: + except MySQLClientError as e: if method == "clone": logger.error( f"Failed to add {instance_address=} to {self.cluster_name=} on {self.instance_address=}", ) - raise MySQLAddInstanceToClusterError + raise MySQLAddInstanceToClusterError from e logger.debug( f"Cannot add {instance_address=} to {self.cluster_name=} with recovery {method=}. Trying method 'clone'" @@ -1941,6 +2029,48 @@ def add_instance_to_cluster( # always release the lock self._release_lock(local_lock_instance, instance_unit_label, UNIT_ADD_LOCKNAME) + def rejoin_instance_to_cluster( + self, *, unit_address: str, unit_label: str, from_instance: str + ) -> None: + """Rejoin an instance to the InnoDB cluster. + + Args: + unit_address: The address of the unit to rejoin. + unit_label: The label of the unit to rejoin. + from_instance: The instance from which to rejoin the cluster. + """ + options = {"password": self.server_config_password} + commands = ( + f"cluster = dba.get_cluster('{self.cluster_name}')", + f"cluster.rejoin_instance('{self.instance_def(self.server_config_user, unit_address)}'," + f"{options})", + ) + + from_instance = from_instance or self.instance_address + if not self._acquire_lock( + from_instance, + unit_label, + UNIT_ADD_LOCKNAME, + ): + raise MySQLLockAcquisitionError("Lock not acquired") + + try: + logger.debug(f"Rejoining instance {unit_address} to cluster {self.cluster_name}") + self._run_mysqlsh_script( + "\n".join(commands), + user=self.server_config_user, + password=self.server_config_password, + host=self.instance_def(self.server_config_user, from_instance), + ) + except MySQLClientError as e: + logger.error( + f"Failed to rejoin instance {unit_address} to cluster {self.cluster_name}" + ) + raise MySQLRejoinInstanceToClusterError from e + finally: + # always release the lock + self._release_lock(from_instance, unit_label, UNIT_ADD_LOCKNAME) + def is_instance_configured_for_innodb( self, instance_address: str, instance_unit_label: str ) -> bool: @@ -1983,7 +2113,7 @@ def drop_group_replication_metadata_schema(self) -> None: except MySQLClientError: logger.error("Failed to drop group replication metadata schema") - def are_locks_acquired(self, from_instance: Optional[str] = None) -> bool: + def are_locks_acquired(self, from_instance: str | None = None) -> bool: """Report if any topology change is being executed.""" commands = ( "result = session.run_sql(\"SELECT COUNT(*) FROM mysql.juju_units_operations WHERE status='in-progress';\")", @@ -2007,7 +2137,7 @@ def are_locks_acquired(self, from_instance: Optional[str] = None) -> bool: def rescan_cluster( self, - from_instance: Optional[str] = None, + from_instance: str | None = None, remove_instances: bool = False, add_instances: bool = False, ) -> None: @@ -2032,7 +2162,7 @@ def rescan_cluster( ) except MySQLClientError as e: logger.error("Error rescanning the cluster") - raise MySQLRescanClusterError(e.message) + raise MySQLRescanClusterError(e.message) from e def is_instance_in_cluster(self, unit_label: str) -> bool: """Confirm if instance is in the cluster.""" @@ -2069,14 +2199,40 @@ def is_instance_in_cluster(self, unit_label: str) -> bool: ) return False + def instance_belongs_to_cluster(self, unit_label: str) -> bool: + """Check if instance belongs to cluster independently of current state. + + Args: + unit_label: The label of the unit to check. + """ + # Variables come from the charm + query = ( + "SELECT instance_id FROM mysql_innodb_cluster_metadata.instances WHERE cluster_id =" # noqa: S608 + "(SELECT cluster_id FROM mysql_innodb_cluster_metadata.clusters WHERE cluster_name =" + f" '{self.cluster_name}') AND instance_name = '{unit_label}';", + ) + + try: + output = self._run_mysqlcli_script( + query, + user=self.server_config_user, + password=self.server_config_password, + ) + except MySQLClientError: + logger.debug( + "Instance has no cluster metadata, assuming it does not belong to any cluster." + ) + return False + return len(output) == 1 + @retry( wait=wait_fixed(2), stop=stop_after_attempt(3), retry=retry_if_exception_type(TimeoutError), ) def get_cluster_status( - self, from_instance: Optional[str] = None, extended: Optional[bool] = False - ) -> Optional[dict]: + self, from_instance: str | None = None, extended: bool | None = False + ) -> dict | None: """Get the cluster status dictionary.""" options = {"extended": extended} status_commands = ( @@ -2098,8 +2254,8 @@ def get_cluster_status( logger.error(f"Failed to get cluster status for {self.cluster_name}") def get_cluster_set_status( - self, extended: Optional[int] = 1, from_instance: Optional[str] = None - ) -> Optional[dict]: + self, from_instance: str | None = None, extended: int | None = 1 + ) -> dict | None: """Get the cluster-set status dictionary.""" options = {"extended": extended} status_commands = ( @@ -2128,7 +2284,7 @@ def get_cluster_names(self) -> set[str]: return set() return set(status["clusters"]) - def get_replica_cluster_status(self, replica_cluster_name: Optional[str] = None) -> str: + def get_replica_cluster_status(self, replica_cluster_name: str | None = None) -> str: """Get the replica cluster status.""" if not replica_cluster_name: replica_cluster_name = self.cluster_name @@ -2153,15 +2309,16 @@ def get_replica_cluster_status(self, replica_cluster_name: Optional[str] = None) def get_cluster_node_count( self, - from_instance: Optional[str] = None, - node_status: Optional[MySQLMemberState] = None, + from_instance: str | None = None, + node_status: MySQLMemberState | None = None, ) -> int: """Retrieve current count of cluster nodes, optionally filtered by status.""" if not node_status: query = "SELECT COUNT(*) FROM performance_schema.replication_group_members" else: + # Variable comes from the charm query = ( - "SELECT COUNT(*) FROM performance_schema.replication_group_members" + "SELECT COUNT(*) FROM performance_schema.replication_group_members" # noqa: S608 f" WHERE member_state = '{node_status.value.upper()}'" ) size_commands = ( @@ -2187,7 +2344,7 @@ def get_cluster_node_count( return int(matches.group(1)) if matches else 0 def execute_remove_instance( - self, connect_instance: Optional[str] = None, force: bool = False + self, connect_instance: str | None = None, force: bool = False ) -> None: """Execute the remove_instance() script with mysqlsh. @@ -2218,7 +2375,10 @@ def execute_remove_instance( wait=wait_random(min=4, max=30), ) def remove_instance( # noqa: C901 - self, unit_label: str, lock_instance: Optional[str] = None + self, + unit_label: str, + lock_instance: str | None = None, + auto_dissolve: bool | None = True, ) -> None: """Remove instance from the cluster. @@ -2226,6 +2386,12 @@ def remove_instance( # noqa: C901 locks on the cluster primary. There is a retry mechanism for any issues obtaining the lock, removing instances/dissolving the cluster, or releasing the lock. + + Args: + unit_label: The label of the unit to remove. + lock_instance: (optional) The instance address to acquire the lock on. + auto_dissolve: (optional) Whether to automatically dissolve the cluster + if this is the last instance in the cluster. """ remaining_cluster_member_addresses = [] skip_release_lock = False @@ -2263,7 +2429,8 @@ def remove_instance( # noqa: C901 self.remove_replica_cluster(self.cluster_name) else: skip_release_lock = True - self.dissolve_cluster() + if auto_dissolve: + self.dissolve_cluster() else: # Get remaining cluster member addresses before calling mysqlsh.remove_instance() @@ -2282,39 +2449,37 @@ def remove_instance( # noqa: C901 logger.warning( f"Failed to acquire lock and remove instance {self.instance_address} with error {e.message}" ) - raise MySQLRemoveInstanceRetryError(e.message) + raise MySQLRemoveInstanceRetryError(e.message) from e finally: # There is no need to release the lock if single cluster was dissolved - if skip_release_lock: - return - - try: - if not lock_instance: - if len(remaining_cluster_member_addresses) == 0: - raise MySQLRemoveInstanceRetryError( - "No remaining instance to query cluster primary from." - ) - - # Retrieve the cluster primary's address again (in case the old primary is - # scaled down) - # Release the lock by making a request to this primary member's address - lock_instance = self.get_cluster_primary_address( - connect_instance_address=remaining_cluster_member_addresses[0] - ) + if not skip_release_lock: + try: if not lock_instance: - raise MySQLRemoveInstanceError( - "Unable to retrieve the address of the cluster primary" + if len(remaining_cluster_member_addresses) == 0: + raise MySQLRemoveInstanceRetryError( + "No remaining instance to query cluster primary from." + ) + + # Retrieve the cluster primary's address again (in case the old primary is + # scaled down) + # Release the lock by making a request to this primary member's address + lock_instance = self.get_cluster_primary_address( + connect_instance_address=remaining_cluster_member_addresses[0] ) + if not lock_instance: + raise MySQLRemoveInstanceError( + "Unable to retrieve the address of the cluster primary" + ) - self._release_lock(lock_instance, unit_label, UNIT_TEARDOWN_LOCKNAME) - except MySQLClientError as e: - # Raise an error that does not lead to a retry of this method - logger.error(f"Failed to release lock on {unit_label}") - raise MySQLRemoveInstanceError(e.message) + self._release_lock(lock_instance, unit_label, UNIT_TEARDOWN_LOCKNAME) + except MySQLClientError as e: + # Raise an error that does not lead to a retry of this method + logger.error(f"Failed to release lock on {unit_label}") + raise MySQLRemoveInstanceError(e.message) from e def dissolve_cluster(self) -> None: """Dissolve the cluster independently of the unit teardown process.""" - logger.debug(f"Dissolving cluster {self.cluster_name}") + logger.info(f"Dissolving cluster {self.cluster_name}") dissolve_cluster_commands = ( f"cluster = dba.get_cluster('{self.cluster_name}')", "cluster.dissolve({'force': 'true'})", @@ -2332,13 +2497,14 @@ def _acquire_lock(self, primary_address: str, unit_label: str, lock_name: str) - f"Attempting to acquire lock {lock_name} on {primary_address} for unit {unit_label}" ) + # Variables generated from the charm acquire_lock_commands = ( ( - f"session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='{unit_label}'," + f"session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='{unit_label}'," # noqa: S608 f" status='in-progress' WHERE task='{lock_name}' AND executor='';\")" ), ( - 'acquired_lock = session.run_sql("SELECT count(*) FROM mysql.juju_units_operations' + 'acquired_lock = session.run_sql("SELECT count(*) FROM mysql.juju_units_operations' # noqa: S608 f" WHERE task='{lock_name}' AND executor='{unit_label}';\").fetch_one()[0]" ), "print(f'{acquired_lock}')", @@ -2364,8 +2530,9 @@ def _release_lock(self, primary_address: str, unit_label: str, lock_name: str) - """Releases a lock in the mysql.juju_units_operations table.""" logger.debug(f"Releasing {lock_name=} @{primary_address=} for {unit_label=}") + # Variables generated by the charm release_lock_commands = ( - "r = session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started'" + "r = session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started'" # noqa: S608 f" WHERE task='{lock_name}' AND executor='{unit_label}';\")", "print(r.get_affected_items_count())", ) @@ -2381,7 +2548,7 @@ def _release_lock(self, primary_address: str, unit_label: str, lock_name: str) - else: logger.debug(f"{lock_name=} released for {unit_label=}") - def _get_cluster_member_addresses(self, exclude_unit_labels: List = []) -> Tuple[List, bool]: + def _get_cluster_member_addresses(self, exclude_unit_labels: list) -> tuple[list, bool]: """Get the addresses of the cluster's members.""" logger.debug(f"Getting cluster member addresses, excluding units {exclude_unit_labels}") @@ -2413,8 +2580,8 @@ def _get_cluster_member_addresses(self, exclude_unit_labels: List = []) -> Tuple return (member_addresses, "" in output) def get_cluster_primary_address( - self, connect_instance_address: Optional[str] = None - ) -> Optional[str]: + self, connect_instance_address: str | None = None + ) -> str | None: """Get the cluster primary's address.""" logger.debug("Getting cluster primary member's address") @@ -2433,7 +2600,7 @@ def get_cluster_primary_address( ) except MySQLClientError as e: logger.warning("Failed to get cluster primary addresses") - raise MySQLGetClusterPrimaryAddressError(e.message) + raise MySQLGetClusterPrimaryAddressError(e.message) from e matches = re.search(r"(.+)", output) if not matches: @@ -2442,8 +2609,8 @@ def get_cluster_primary_address( return matches.group(1) def get_cluster_set_global_primary_address( - self, connect_instance_address: Optional[str] = None - ) -> Optional[str]: + self, connect_instance_address: str | None = None + ) -> str | None: """Get the cluster set global primary's address.""" logger.debug("Getting cluster set global primary member's address") @@ -2462,7 +2629,7 @@ def get_cluster_set_global_primary_address( ) except MySQLClientError as e: logger.warning("Failed to get cluster set global primary addresses") - raise MySQLGetClusterPrimaryAddressError(e.message) + raise MySQLGetClusterPrimaryAddressError(e.message) from e matches = re.search(r"(.+)", output) if not matches: @@ -2475,7 +2642,7 @@ def get_cluster_set_global_primary_address( return address - def get_cluster_topology(self) -> Optional[dict]: + def get_cluster_topology(self) -> dict | None: """Get the cluster topology.""" status = self.get_cluster_status() if not status: @@ -2483,7 +2650,7 @@ def get_cluster_topology(self) -> Optional[dict]: return status["defaultreplicaset"]["topology"] - def get_primary_label(self) -> Optional[str]: + def get_primary_label(self) -> str | None: """Get the label of the cluster's primary.""" topology = self.get_cluster_topology() if not topology: @@ -2516,9 +2683,9 @@ def set_cluster_primary(self, new_primary_address: str) -> None: ) except MySQLClientError as e: logger.error("Failed to set cluster primary") - raise MySQLSetClusterPrimaryError(e.message) + raise MySQLSetClusterPrimaryError(e.message) from e - def verify_server_upgradable(self, instance: Optional[str] = None) -> None: + def verify_server_upgradable(self, instance: str | None = None) -> None: """Wrapper for API check_for_server_upgrade.""" # use cluster admin user to enforce standard port usage check_command = [ @@ -2551,10 +2718,10 @@ def _strip_output(output: str): if result["errorCount"] == 0: return raise MySQLServerNotUpgradableError(result.get("summary")) - except MySQLClientError: - raise MySQLServerNotUpgradableError("Failed to check for server upgrade") + except MySQLClientError as e: + raise MySQLServerNotUpgradableError("Failed to check for server upgrade") from e - def get_mysql_version(self) -> Optional[str]: + def get_mysql_version(self) -> str | None: """Get the running mysqld version.""" logger.debug("Getting InnoDB version") @@ -2572,7 +2739,7 @@ def get_mysql_version(self) -> Optional[str]: ) except MySQLClientError as e: logger.warning("Failed to get workload version") - raise MySQLGetMySQLVersionError(e.message) + raise MySQLGetMySQLVersionError(e.message) from e matches = re.search(r"(.+)", output) @@ -2602,7 +2769,7 @@ def grant_privileges_to_user( ) except MySQLClientError as e: logger.warning(f"Failed to grant privileges to user {username}@{hostname}") - raise MySQLGrantPrivilegesToUserError(e.message) + raise MySQLGrantPrivilegesToUserError(e.message) from e def update_user_password(self, username: str, new_password: str, host: str = "%") -> None: """Updates user password in MySQL database.""" @@ -2623,12 +2790,12 @@ def update_user_password(self, username: str, new_password: str, host: str = "%" password=self.server_config_password, host=self.instance_def(self.server_config_user, instance_address), ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to update user password for user {username}") - raise MySQLCheckUserExistenceError + raise MySQLCheckUserExistenceError from e @retry(reraise=True, stop=stop_after_attempt(3), wait=wait_fixed(GET_MEMBER_STATE_TIME)) - def get_member_state(self) -> Tuple[str, str]: + def get_member_state(self) -> tuple[str, str]: """Get member status (MEMBER_STATE, MEMBER_ROLE) in the cluster.""" member_state_query = ( "SELECT MEMBER_STATE, MEMBER_ROLE, MEMBER_ID, @@server_uuid" @@ -2642,9 +2809,9 @@ def get_member_state(self) -> Tuple[str, str]: password=self.cluster_admin_password, timeout=10, ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to get member state: mysqld daemon is down") - raise MySQLUnableToGetMemberStateError + raise MySQLUnableToGetMemberStateError from e # output is like: # [('ONLINE', @@ -2696,17 +2863,17 @@ def is_cluster_auto_rejoin_ongoing(self): return int(completed_matches.group(1)) < int(estimated_matches.group(1)) - def is_cluster_replica(self, from_instance: Optional[str] = None) -> Optional[bool]: + def is_cluster_replica(self, from_instance: str | None = None) -> bool | None: """Check if this cluster is a replica in a cluster set.""" - cs_status = self.get_cluster_set_status(extended=0, from_instance=from_instance) + cs_status = self.get_cluster_set_status(from_instance=from_instance, extended=0) if not cs_status: return return cs_status["clusters"][self.cluster_name.lower()]["clusterrole"] == "replica" - def get_cluster_set_name(self, from_instance: Optional[str] = None) -> Optional[str]: + def get_cluster_set_name(self, from_instance: str | None = None) -> str | None: """Get cluster set name.""" - cs_status = self.get_cluster_set_status(extended=0, from_instance=from_instance) + cs_status = self.get_cluster_set_status(from_instance=from_instance, extended=0) if not cs_status: return None @@ -2745,6 +2912,29 @@ def start_group_replication(self) -> None: except MySQLClientError: logger.warning("Failed to start Group Replication for unit") + def force_quorum_from_instance(self) -> None: + """Force quorum from the current instance. + + Recovery for cases where majority loss put the cluster in defunct state. + """ + instance_definition = self.instance_def(self.server_config_user) + force_quorum_command = ( + f"cluster = dba.get_cluster('{self.cluster_name}')", + f"cluster.force_quorum_using_partition_of('{self.server_config_user}@" + f"{instance_definition}','{self.server_config_password}')", + ) + + try: + self._run_mysqlsh_script( + "\n".join(force_quorum_command), + user=self.server_config_user, + password=self.server_config_password, + host=instance_definition, + ) + except MySQLClientError as e: + logger.error("Failed to force quorum from instance") + raise MySQLForceQuorumFromInstanceError from e + def reboot_from_complete_outage(self) -> None: """Wrapper for reboot_cluster_from_complete_outage command.""" reboot_from_outage_command = ( @@ -2760,7 +2950,7 @@ def reboot_from_complete_outage(self) -> None: ) except MySQLClientError as e: logger.error("Failed to reboot cluster") - raise MySQLRebootFromCompleteOutageError(e.message) + raise MySQLRebootFromCompleteOutageError(e.message) from e def hold_if_recovering(self) -> None: """Hold execution if member is recovering.""" @@ -2786,9 +2976,9 @@ def set_instance_offline_mode(self, offline_mode: bool = False) -> None: user=self.server_config_user, password=self.server_config_password, ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to set instance state to offline_mode {mode}") - raise MySQLSetInstanceOfflineModeError + raise MySQLSetInstanceOfflineModeError from e def set_instance_option(self, option: str, value: Any) -> None: """Sets an instance option.""" @@ -2804,9 +2994,9 @@ def set_instance_option(self, option: str, value: Any) -> None: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error(f"Failed to set option {option} with value {value}") - raise MySQLSetInstanceOptionError + raise MySQLSetInstanceOptionError from e def offline_mode_and_hidden_instance_exists(self) -> bool: """Indicates whether an instance exists in offline_mode and hidden from router.""" @@ -2824,9 +3014,9 @@ def offline_mode_and_hidden_instance_exists(self) -> bool: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to query offline mode instances") - raise MySQLOfflineModeAndHiddenInstanceExistsError + raise MySQLOfflineModeAndHiddenInstanceExistsError from e matches = re.search(r"(.*)", output) @@ -2837,7 +3027,7 @@ def offline_mode_and_hidden_instance_exists(self) -> bool: def get_innodb_buffer_pool_parameters( self, available_memory: int - ) -> Tuple[int, Optional[int], Optional[int]]: + ) -> tuple[int, int | None, int | None]: """Calculate innodb buffer pool parameters for the instance.""" # Reference: based off xtradb-cluster-operator # https://github.com/percona/percona-xtradb-cluster-operator/blob/main/pkg/pxc/app/config/autotune.go#L31-L54 @@ -2876,9 +3066,11 @@ def get_innodb_buffer_pool_parameters( innodb_buffer_pool_chunk_size, group_replication_message_cache, ) - except Exception: + except Exception as e: logger.error("Failed to compute innodb buffer pool parameters") - raise MySQLGetAutoTuningParametersError("Error computing buffer pool parameters") + raise MySQLGetAutoTuningParametersError( + "Error computing buffer pool parameters" + ) from e def get_max_connections(self, available_memory: int) -> int: """Calculate max_connections parameter for the instance.""" @@ -2901,16 +3093,16 @@ def get_available_memory(self) -> int: def execute_backup_commands( self, s3_path: str, - s3_parameters: Dict[str, str], + s3_parameters: dict[str, str], xtrabackup_location: str, xbcloud_location: str, xtrabackup_plugin_dir: str, mysqld_socket_file: str, tmp_base_directory: str, defaults_config_file: str, - user: Optional[str] = None, - group: Optional[str] = None, - ) -> Tuple[str, str]: + user: str | None = None, + group: str | None = None, + ) -> tuple[str, str]: """Executes commands to create a backup with the given args.""" nproc_command = ["nproc"] make_temp_dir_command = f"mktemp --directory {tmp_base_directory}/xtra_backup_XXXX".split() @@ -2918,14 +3110,14 @@ def execute_backup_commands( try: nproc, _ = self._execute_commands(nproc_command) tmp_dir, _ = self._execute_commands(make_temp_dir_command, user=user, group=group) - except MySQLExecError: + except MySQLExecError as e: logger.error("Failed to execute commands prior to running backup") - raise MySQLExecuteBackupCommandsError - except Exception: + raise MySQLExecuteBackupCommandsError from e + except Exception as e: # Catch all other exceptions to prevent the database being stuck in # a bad state due to pre-backup operations logger.error("Failed unexpectedly to execute commands prior to running backup") - raise MySQLExecuteBackupCommandsError + raise MySQLExecuteBackupCommandsError from e # TODO: remove flags --no-server-version-check # when MySQL and XtraBackup versions are in sync @@ -2974,20 +3166,20 @@ def execute_backup_commands( }, stream_output="stderr", ) - except MySQLExecError: + except MySQLExecError as e: logger.error("Failed to execute backup commands") - raise MySQLExecuteBackupCommandsError - except Exception: + raise MySQLExecuteBackupCommandsError from e + except Exception as e: # Catch all other exceptions to prevent the database being stuck in # a bad state due to pre-backup operations logger.error("Failed unexpectedly to execute backup commands") - raise MySQLExecuteBackupCommandsError + raise MySQLExecuteBackupCommandsError from e def delete_temp_backup_directory( self, tmp_base_directory: str, - user: Optional[str] = None, - group: Optional[str] = None, + user: str | None = None, + group: str | None = None, ) -> None: """Delete the temp backup directory.""" delete_temp_dir_command = f"find {tmp_base_directory} -wholename {tmp_base_directory}/xtra_backup_* -delete".split() @@ -3004,21 +3196,21 @@ def delete_temp_backup_directory( ) except MySQLExecError as e: logger.error("Failed to delete temp backup directory") - raise MySQLDeleteTempBackupDirectoryError(e.message) - except Exception: + raise MySQLDeleteTempBackupDirectoryError(e.message) from e + except Exception as e: logger.error("Failed to delete temp backup directory") - raise MySQLDeleteTempBackupDirectoryError + raise MySQLDeleteTempBackupDirectoryError from e def retrieve_backup_with_xbcloud( self, backup_id: str, - s3_parameters: Dict[str, str], + s3_parameters: dict[str, str], temp_restore_directory: str, xbcloud_location: str, xbstream_location: str, - user: Optional[str] = None, - group: Optional[str] = None, - ) -> Tuple[str, str, str]: + user: str | None = None, + group: str | None = None, + ) -> tuple[str, str, str]: """Retrieve the specified backup from S3.""" nproc_command = ["nproc"] make_temp_dir_command = ( @@ -3035,7 +3227,7 @@ def retrieve_backup_with_xbcloud( ) except MySQLExecError as e: logger.error("Failed to execute commands prior to running xbcloud get") - raise MySQLRetrieveBackupWithXBCloudError(e.message) + raise MySQLRetrieveBackupWithXBCloudError(e.message) from e retrieve_backup_command = [ f"{xbcloud_location} get", @@ -3073,26 +3265,26 @@ def retrieve_backup_with_xbcloud( return (stdout, stderr, tmp_dir) except MySQLExecError as e: logger.error("Failed to retrieve backup") - raise MySQLRetrieveBackupWithXBCloudError(e.message) - except Exception: + raise MySQLRetrieveBackupWithXBCloudError(e.message) from e + except Exception as e: logger.error("Failed to retrieve backup") - raise MySQLRetrieveBackupWithXBCloudError + raise MySQLRetrieveBackupWithXBCloudError from e def prepare_backup_for_restore( self, backup_location: str, xtrabackup_location: str, xtrabackup_plugin_dir: str, - user: Optional[str] = None, - group: Optional[str] = None, - ) -> Tuple[str, str]: + user: str | None = None, + group: str | None = None, + ) -> tuple[str, str]: """Prepare the backup in the provided dir for restore.""" try: innodb_buffer_pool_size, _, _ = self.get_innodb_buffer_pool_parameters( self.get_available_memory() ) except MySQLGetAutoTuningParametersError as e: - raise MySQLPrepareBackupForRestoreError(e.message) + raise MySQLPrepareBackupForRestoreError(e.message) from e prepare_backup_command = [ xtrabackup_location, @@ -3116,16 +3308,16 @@ def prepare_backup_for_restore( ) except MySQLExecError as e: logger.error("Failed to prepare backup for restore") - raise MySQLPrepareBackupForRestoreError(e.message) - except Exception: + raise MySQLPrepareBackupForRestoreError(e.message) from e + except Exception as e: logger.error("Failed to prepare backup for restore") - raise MySQLPrepareBackupForRestoreError + raise MySQLPrepareBackupForRestoreError from e def empty_data_files( self, mysql_data_directory: str, - user: Optional[str] = None, - group: Optional[str] = None, + user: str | None = None, + group: str | None = None, ) -> None: """Empty the mysql data directory in preparation of backup restore.""" empty_data_files_command = [ @@ -3149,10 +3341,10 @@ def empty_data_files( ) except MySQLExecError as e: logger.error("Failed to empty data directory in prep for backup restore") - raise MySQLEmptyDataDirectoryError(e.message) - except Exception: + raise MySQLEmptyDataDirectoryError(e.message) from e + except Exception as e: logger.error("Failed to empty data directory in prep for backup restore") - raise MySQLEmptyDataDirectoryError + raise MySQLEmptyDataDirectoryError from e def restore_backup( self, @@ -3161,9 +3353,9 @@ def restore_backup( defaults_config_file: str, mysql_data_directory: str, xtrabackup_plugin_directory: str, - user: Optional[str] = None, - group: Optional[str] = None, - ) -> Tuple[str, str]: + user: str | None = None, + group: str | None = None, + ) -> tuple[str, str]: """Restore the provided prepared backup.""" restore_backup_command = [ xtrabackup_location, @@ -3187,21 +3379,21 @@ def restore_backup( ) except MySQLExecError as e: logger.error("Failed to restore backup") - raise MySQLRestoreBackupError(e.message) - except Exception: + raise MySQLRestoreBackupError(e.message) from e + except Exception as e: logger.error("Failed to restore backup") - raise MySQLRestoreBackupError + raise MySQLRestoreBackupError from e def restore_pitr( self, host: str, mysql_user: str, password: str, - s3_parameters: Dict[str, str], + s3_parameters: dict[str, str], restore_to_time: str, user: str | None = None, group: str | None = None, - ) -> Tuple[str, str]: + ) -> tuple[str, str]: """Run point-in-time-recovery using binary logs from the S3 repository. Args: @@ -3240,16 +3432,16 @@ def restore_pitr( ) except MySQLExecError as e: logger.exception("Failed to restore pitr") - raise MySQLRestorePitrError(e.message) - except Exception: + raise MySQLRestorePitrError(e.message) from e + except Exception as e: logger.exception("Failed to restore pitr") - raise MySQLRestorePitrError + raise MySQLRestorePitrError from e def delete_temp_restore_directory( self, temp_restore_directory: str, - user: Optional[str] = None, - group: Optional[str] = None, + user: str | None = None, + group: str | None = None, ) -> None: """Delete the temp restore directory from the mysql data directory.""" logger.info(f"Deleting temp restore directory in {temp_restore_directory}") @@ -3272,18 +3464,18 @@ def delete_temp_restore_directory( ) except MySQLExecError as e: logger.error("Failed to remove temp backup directory") - raise MySQLDeleteTempRestoreDirectoryError(e.message) + raise MySQLDeleteTempRestoreDirectoryError(e.message) from e @abstractmethod def _execute_commands( self, - commands: List[str], + commands: list[str], bash: bool = False, - user: Optional[str] = None, - group: Optional[str] = None, - env_extra: Dict = {}, - stream_output: Optional[str] = None, - ) -> Tuple[str, str]: + user: str | None = None, + group: str | None = None, + env_extra: dict | None = None, + stream_output: str | None = None, + ) -> tuple[str, str]: """Execute commands on the server where MySQL is running.""" raise NotImplementedError @@ -3309,9 +3501,9 @@ def tls_setup( user=self.server_config_user, password=self.server_config_password, ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to set custom TLS configuration") - raise MySQLTLSSetupError("Failed to set custom TLS configuration") + raise MySQLTLSSetupError("Failed to set custom TLS configuration") from e def kill_unencrypted_sessions(self) -> None: """Kill non local, non system open unencrypted connections.""" @@ -3333,9 +3525,9 @@ def kill_unencrypted_sessions(self) -> None: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to kill external sessions") - raise MySQLKillSessionError + raise MySQLKillSessionError from e def kill_client_sessions(self) -> None: """Kill non local, non system open unencrypted connections.""" @@ -3357,9 +3549,9 @@ def kill_client_sessions(self) -> None: password=self.server_config_password, host=self.instance_def(self.server_config_user), ) - except MySQLClientError: + except MySQLClientError as e: logger.error("Failed to kill external sessions") - raise MySQLKillSessionError + raise MySQLKillSessionError from e def check_mysqlcli_connection(self) -> bool: """Checks if it is possible to connect to the server with mysqlcli.""" @@ -3382,7 +3574,7 @@ def check_mysqlcli_connection(self) -> bool: logger.error("Failed to connect to MySQL with mysqlcli with default root user") return False - def get_pid_of_port_3306(self) -> Optional[str]: + def get_pid_of_port_3306(self) -> str | None: """Retrieves the PID of the process that is bound to port 3306.""" get_pid_command = ["fuser", "3306/tcp"] @@ -3392,7 +3584,7 @@ def get_pid_of_port_3306(self) -> Optional[str]: except MySQLExecError: return None - def flush_mysql_logs(self, logs_type: Union[MySQLTextLogs, list[MySQLTextLogs]]) -> None: + def flush_mysql_logs(self, logs_type: MySQLTextLogs | list[MySQLTextLogs]) -> None: """Flushes the specified logs_type logs.""" flush_logs_commands = [ 'session.run_sql("SET sql_log_bin = 0")', @@ -3448,12 +3640,13 @@ def get_non_system_databases(self) -> set[str]: "sys", } - def strip_off_passwords(self, input_string: Optional[str]) -> str: + def strip_off_passwords(self, input_string: str | None) -> str: """Strips off passwords from the input string.""" if not input_string: return "" stripped_input = input_string - hidden_pass = "*****" + # Not an actual pass + hidden_pass = "*****" # noqa: S105 for password in self.passwords: stripped_input = stripped_input.replace(password, hidden_pass) if "IDENTIFIED" in input_string: @@ -3494,7 +3687,7 @@ def get_current_group_replication_id(self) -> str: ) except MySQLClientError as e: logger.warning("Failed to get current group replication id", exc_info=e) - raise MySQLGetGroupReplicationIDError(e.message) + raise MySQLGetGroupReplicationIDError(e.message) from e matches = re.search(r"(.+)", output) @@ -3548,7 +3741,7 @@ def _run_mysqlsh_script( user: str, host: str, password: str, - timeout: Optional[int] = None, + timeout: int | None = None, exception_as_warning: bool = False, ) -> str: """Execute a MySQL shell script. @@ -3571,10 +3764,10 @@ def _run_mysqlsh_script( @abstractmethod def _run_mysqlcli_script( self, - script: Union[Tuple[Any, ...], List[Any]], + script: tuple[Any, ...] | list[Any], user: str = "root", - password: Optional[str] = None, - timeout: Optional[int] = None, + password: str | None = None, + timeout: int | None = None, exception_as_warning: bool = False, log_errors: bool = False, ) -> list: diff --git a/lib/charms/mysql/v0/s3_helpers.py b/lib/charms/mysql/v0/s3_helpers.py index 292a57cfe6..0f6d5f1630 100644 --- a/lib/charms/mysql/v0/s3_helpers.py +++ b/lib/charms/mysql/v0/s3_helpers.py @@ -21,7 +21,6 @@ import time from contextlib import nullcontext from io import BytesIO -from typing import Dict, List, Tuple import boto3 import botocore @@ -37,7 +36,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 11 +LIBPATCH = 13 S3_GROUP_REPLICATION_ID_FILE = "group_replication_id.txt" @@ -69,7 +68,7 @@ def _construct_endpoint(s3_parameters: dict) -> str: return endpoint -def _get_bucket(s3_parameters: Dict) -> boto3.resources.base.ServiceResource: +def _get_bucket(s3_parameters: dict) -> boto3.resources.base.ServiceResource: """Get an S3 bucket resource. Args: @@ -102,7 +101,7 @@ def _get_bucket(s3_parameters: Dict) -> boto3.resources.base.ServiceResource: return s3.Bucket(s3_parameters["bucket"]) -def upload_content_to_s3(content: str, content_path: str, s3_parameters: Dict) -> bool: +def upload_content_to_s3(content: str, content_path: str, s3_parameters: dict) -> bool: """Uploads the provided contents to the provided S3 bucket. Args: @@ -174,8 +173,8 @@ def _read_content_from_s3(content_path: str, s3_parameters: dict) -> str | None: def _compile_backups_from_file_ids( - metadata_ids: List[str], md5_ids: List[str], log_ids: List[str] -) -> List[Tuple[str, str]]: + metadata_ids: list[str], md5_ids: list[str], log_ids: list[str] +) -> list[tuple[str, str]]: """Helper function that compiles tuples of (backup_id, status) from file ids.""" backups = [] for backup_id in metadata_ids: @@ -190,7 +189,7 @@ def _compile_backups_from_file_ids( return backups -def list_backups_in_s3_path(s3_parameters: Dict) -> List[Tuple[str, str]]: # noqa: C901 +def list_backups_in_s3_path(s3_parameters: dict) -> list[tuple[str, str]]: """Retrieve subdirectories in an S3 path. Args: @@ -254,7 +253,7 @@ def list_backups_in_s3_path(s3_parameters: Dict) -> List[Tuple[str, str]]: # no # set a more meaningful error message. if e.response["Error"]["Code"] == "NoSuchBucket": message = f"Bucket {s3_parameters['bucket']} does not exist" - setattr(e, "message", message) + e.message = message raise except (KeyError, AttributeError): pass @@ -265,7 +264,7 @@ def list_backups_in_s3_path(s3_parameters: Dict) -> List[Tuple[str, str]]: # no raise -def fetch_and_check_existence_of_s3_path(path: str, s3_parameters: Dict[str, str]) -> bool: +def fetch_and_check_existence_of_s3_path(path: str, s3_parameters: dict[str, str]) -> bool: """Checks the existence of a provided S3 path by fetching the object. Args: @@ -300,7 +299,7 @@ def fetch_and_check_existence_of_s3_path(path: str, s3_parameters: Dict[str, str def ensure_s3_compatible_group_replication_id( - group_replication_id: str, s3_parameters: Dict[str, str] + group_replication_id: str, s3_parameters: dict[str, str] ) -> bool: """Checks if group replication id is equal to the one in the provided S3 repository. diff --git a/lib/charms/mysql/v0/tls.py b/lib/charms/mysql/v0/tls.py index 7effc1ad95..61ec7b0464 100644 --- a/lib/charms/mysql/v0/tls.py +++ b/lib/charms/mysql/v0/tls.py @@ -24,7 +24,6 @@ import re import socket import typing -from typing import List, Optional, Tuple import ops from charms.mysql.v0.mysql import MySQLKillSessionError, MySQLTLSSetupError @@ -35,10 +34,6 @@ generate_csr, generate_private_key, ) -from ops.charm import ActionEvent -from ops.framework import Object -from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus - from constants import ( MYSQL_DATA_DIR, TLS_RELATION, @@ -46,12 +41,15 @@ TLS_SSL_CERT_FILE, TLS_SSL_KEY_FILE, ) +from ops.charm import ActionEvent +from ops.framework import Object +from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus logger = logging.getLogger(__name__) LIBID = "eb73947deedd4380a3a90d527e0878eb" LIBAPI = 0 -LIBPATCH = 8 +LIBPATCH = 10 SCOPE = "unit" @@ -187,12 +185,9 @@ def _on_tls_relation_broken(self, _) -> None: # ======================= # Helpers # ======================= - def _request_certificate(self, param: Optional[str]): + def _request_certificate(self, param: str | None): """Request a certificate to TLS Certificates Operator.""" - if param is None: - key = generate_private_key() - else: - key = self._parse_tls_file(param) + key = generate_private_key() if param is None else self._parse_tls_file(param) csr = generate_csr( private_key=key, @@ -220,7 +215,7 @@ def _parse_tls_file(raw_content: str) -> bytes: ).encode("utf-8") return base64.b64decode(raw_content) - def _get_sans(self) -> List[str]: + def _get_sans(self) -> list[str]: """Create a list of DNS names for a unit. Returns: @@ -233,7 +228,7 @@ def _get_sans(self) -> List[str]: str(self.charm.model.get_binding(self.charm.peers).network.bind_address), ] - def get_tls_content(self) -> Tuple[Optional[str], Optional[str], Optional[str]]: + def get_tls_content(self) -> tuple[str | None, str | None, str | None]: """Retrieve TLS content. Return TLS files as required by mysql. diff --git a/poetry.lock b/poetry.lock index 1f2322d5d0..9171be77b0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. [[package]] name = "allure-pytest" @@ -68,7 +68,7 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.23)"] [[package]] @@ -87,8 +87,8 @@ files = [ six = ">=1.12.0" [package.extras] -astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] -test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] +astroid = ["astroid (>=1,<2) ; python_version < \"3\"", "astroid (>=2,<4) ; python_version >= \"3\""] +test = ["astroid (>=1,<2) ; python_version < \"3\"", "astroid (>=2,<4) ; python_version >= \"3\"", "pytest"] [[package]] name = "attrs" @@ -107,8 +107,8 @@ cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] dev = ["attrs[tests]", "pre-commit"] docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.6) ; platform_python_implementation == \"CPython\" and python_version >= \"3.8\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.8\""] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] [[package]] name = "backoff" @@ -129,7 +129,7 @@ description = "Base class for creating enumerated constants that are also subcla optional = false python-versions = ">=3.8.6,<3.11" groups = ["integration"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.10\"" files = [ {file = "backports_strenum-1.3.1-py3-none-any.whl", hash = "sha256:cdcfe36dc897e2615dc793b7d3097f54d359918fc448754a517e6f23044ccf83"}, {file = "backports_strenum-1.3.1.tar.gz", hash = "sha256:77c52407342898497714f0596e86188bb7084f89063226f4ba66863482f42414"}, @@ -421,7 +421,7 @@ files = [ [package.extras] dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"] hard-encoding-detection = ["chardet"] -toml = ["tomli"] +toml = ["tomli ; python_version < \"3.11\""] types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"] [[package]] @@ -523,7 +523,7 @@ files = [ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "cryptography" @@ -612,7 +612,7 @@ description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["main", "charm-libs", "integration", "unit"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.10\"" files = [ {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, @@ -634,7 +634,7 @@ files = [ ] [package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] [[package]] name = "google-auth" @@ -732,7 +732,7 @@ idna = "*" sniffio = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -785,7 +785,7 @@ zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\"", "pytest-perf (>=0.9.2)", "pytest-ruff"] [[package]] name = "iniconfig" @@ -844,7 +844,7 @@ typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] black = ["black"] -doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing-extensions"] +doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli ; python_version < \"3.11\"", "typing-extensions"] kernel = ["ipykernel"] matplotlib = ["matplotlib"] nbconvert = ["nbconvert"] @@ -1374,8 +1374,8 @@ cryptography = ">=3.3" pynacl = ">=1.5" [package.extras] -all = ["gssapi (>=1.4.1)", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] -gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +all = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8) ; platform_system == \"Windows\""] +gssapi = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8) ; platform_system == \"Windows\""] invoke = ["invoke (>=2.0)"] [[package]] @@ -1908,111 +1908,115 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"] [[package]] name = "rpds-py" -version = "0.18.1" +version = "0.20.1" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" groups = ["charm-libs"] files = [ - {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"}, - {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"}, - {file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"}, - {file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"}, - {file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"}, - {file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"}, - {file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"}, - {file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"}, - {file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"}, - {file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"}, - {file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"}, - {file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"}, - {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, + {file = "rpds_py-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a649dfd735fff086e8a9d0503a9f0c7d01b7912a333c7ae77e1515c08c146dad"}, + {file = "rpds_py-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f16bc1334853e91ddaaa1217045dd7be166170beec337576818461268a3de67f"}, + {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14511a539afee6f9ab492b543060c7491c99924314977a55c98bfa2ee29ce78c"}, + {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3ccb8ac2d3c71cda472b75af42818981bdacf48d2e21c36331b50b4f16930163"}, + {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c142b88039b92e7e0cb2552e8967077e3179b22359e945574f5e2764c3953dcf"}, + {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f19169781dddae7478a32301b499b2858bc52fc45a112955e798ee307e294977"}, + {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13c56de6518e14b9bf6edde23c4c39dac5b48dcf04160ea7bce8fca8397cdf86"}, + {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:925d176a549f4832c6f69fa6026071294ab5910e82a0fe6c6228fce17b0706bd"}, + {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:78f0b6877bfce7a3d1ff150391354a410c55d3cdce386f862926a4958ad5ab7e"}, + {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dd645e2b0dcb0fd05bf58e2e54c13875847687d0b71941ad2e757e5d89d4356"}, + {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4f676e21db2f8c72ff0936f895271e7a700aa1f8d31b40e4e43442ba94973899"}, + {file = "rpds_py-0.20.1-cp310-none-win32.whl", hash = "sha256:648386ddd1e19b4a6abab69139b002bc49ebf065b596119f8f37c38e9ecee8ff"}, + {file = "rpds_py-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:d9ecb51120de61e4604650666d1f2b68444d46ae18fd492245a08f53ad2b7711"}, + {file = "rpds_py-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:762703bdd2b30983c1d9e62b4c88664df4a8a4d5ec0e9253b0231171f18f6d75"}, + {file = "rpds_py-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0b581f47257a9fce535c4567782a8976002d6b8afa2c39ff616edf87cbeff712"}, + {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:842c19a6ce894493563c3bd00d81d5100e8e57d70209e84d5491940fdb8b9e3a"}, + {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42cbde7789f5c0bcd6816cb29808e36c01b960fb5d29f11e052215aa85497c93"}, + {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c8e9340ce5a52f95fa7d3b552b35c7e8f3874d74a03a8a69279fd5fca5dc751"}, + {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ba6f89cac95c0900d932c9efb7f0fb6ca47f6687feec41abcb1bd5e2bd45535"}, + {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a916087371afd9648e1962e67403c53f9c49ca47b9680adbeef79da3a7811b0"}, + {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:200a23239781f46149e6a415f1e870c5ef1e712939fe8fa63035cd053ac2638e"}, + {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58b1d5dd591973d426cbb2da5e27ba0339209832b2f3315928c9790e13f159e8"}, + {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6b73c67850ca7cae0f6c56f71e356d7e9fa25958d3e18a64927c2d930859b8e4"}, + {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d8761c3c891cc51e90bc9926d6d2f59b27beaf86c74622c8979380a29cc23ac3"}, + {file = "rpds_py-0.20.1-cp311-none-win32.whl", hash = "sha256:cd945871335a639275eee904caef90041568ce3b42f402c6959b460d25ae8732"}, + {file = "rpds_py-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:7e21b7031e17c6b0e445f42ccc77f79a97e2687023c5746bfb7a9e45e0921b84"}, + {file = "rpds_py-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:36785be22066966a27348444b40389f8444671630063edfb1a2eb04318721e17"}, + {file = "rpds_py-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:142c0a5124d9bd0e2976089484af5c74f47bd3298f2ed651ef54ea728d2ea42c"}, + {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbddc10776ca7ebf2a299c41a4dde8ea0d8e3547bfd731cb87af2e8f5bf8962d"}, + {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15a842bb369e00295392e7ce192de9dcbf136954614124a667f9f9f17d6a216f"}, + {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be5ef2f1fc586a7372bfc355986226484e06d1dc4f9402539872c8bb99e34b01"}, + {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbcf360c9e3399b056a238523146ea77eeb2a596ce263b8814c900263e46031a"}, + {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecd27a66740ffd621d20b9a2f2b5ee4129a56e27bfb9458a3bcc2e45794c96cb"}, + {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0b937b2a1988f184a3e9e577adaa8aede21ec0b38320d6009e02bd026db04fa"}, + {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6889469bfdc1eddf489729b471303739bf04555bb151fe8875931f8564309afc"}, + {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:19b73643c802f4eaf13d97f7855d0fb527fbc92ab7013c4ad0e13a6ae0ed23bd"}, + {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3c6afcf2338e7f374e8edc765c79fbcb4061d02b15dd5f8f314a4af2bdc7feb5"}, + {file = "rpds_py-0.20.1-cp312-none-win32.whl", hash = "sha256:dc73505153798c6f74854aba69cc75953888cf9866465196889c7cdd351e720c"}, + {file = "rpds_py-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:8bbe951244a838a51289ee53a6bae3a07f26d4e179b96fc7ddd3301caf0518eb"}, + {file = "rpds_py-0.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6ca91093a4a8da4afae7fe6a222c3b53ee4eef433ebfee4d54978a103435159e"}, + {file = "rpds_py-0.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b9c2fe36d1f758b28121bef29ed1dee9b7a2453e997528e7d1ac99b94892527c"}, + {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f009c69bc8c53db5dfab72ac760895dc1f2bc1b62ab7408b253c8d1ec52459fc"}, + {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6740a3e8d43a32629bb9b009017ea5b9e713b7210ba48ac8d4cb6d99d86c8ee8"}, + {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:32b922e13d4c0080d03e7b62991ad7f5007d9cd74e239c4b16bc85ae8b70252d"}, + {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe00a9057d100e69b4ae4a094203a708d65b0f345ed546fdef86498bf5390982"}, + {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fe9b04b6fa685bd39237d45fad89ba19e9163a1ccaa16611a812e682913496"}, + {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa7ac11e294304e615b43f8c441fee5d40094275ed7311f3420d805fde9b07b4"}, + {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aa97af1558a9bef4025f8f5d8c60d712e0a3b13a2fe875511defc6ee77a1ab7"}, + {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:483b29f6f7ffa6af845107d4efe2e3fa8fb2693de8657bc1849f674296ff6a5a"}, + {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37fe0f12aebb6a0e3e17bb4cd356b1286d2d18d2e93b2d39fe647138458b4bcb"}, + {file = "rpds_py-0.20.1-cp313-none-win32.whl", hash = "sha256:a624cc00ef2158e04188df5e3016385b9353638139a06fb77057b3498f794782"}, + {file = "rpds_py-0.20.1-cp313-none-win_amd64.whl", hash = "sha256:b71b8666eeea69d6363248822078c075bac6ed135faa9216aa85f295ff009b1e"}, + {file = "rpds_py-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5b48e790e0355865197ad0aca8cde3d8ede347831e1959e158369eb3493d2191"}, + {file = "rpds_py-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3e310838a5801795207c66c73ea903deda321e6146d6f282e85fa7e3e4854804"}, + {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249280b870e6a42c0d972339e9cc22ee98730a99cd7f2f727549af80dd5a963"}, + {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e79059d67bea28b53d255c1437b25391653263f0e69cd7dec170d778fdbca95e"}, + {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b431c777c9653e569986ecf69ff4a5dba281cded16043d348bf9ba505486f36"}, + {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da584ff96ec95e97925174eb8237e32f626e7a1a97888cdd27ee2f1f24dd0ad8"}, + {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a0629ec053fc013808a85178524e3cb63a61dbc35b22499870194a63578fb9"}, + {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fbf15aff64a163db29a91ed0868af181d6f68ec1a3a7d5afcfe4501252840bad"}, + {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:07924c1b938798797d60c6308fa8ad3b3f0201802f82e4a2c41bb3fafb44cc28"}, + {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4a5a844f68776a7715ecb30843b453f07ac89bad393431efbf7accca3ef599c1"}, + {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:518d2ca43c358929bf08f9079b617f1c2ca6e8848f83c1225c88caeac46e6cbc"}, + {file = "rpds_py-0.20.1-cp38-none-win32.whl", hash = "sha256:3aea7eed3e55119635a74bbeb80b35e776bafccb70d97e8ff838816c124539f1"}, + {file = "rpds_py-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:7dca7081e9a0c3b6490a145593f6fe3173a94197f2cb9891183ef75e9d64c425"}, + {file = "rpds_py-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b41b6321805c472f66990c2849e152aff7bc359eb92f781e3f606609eac877ad"}, + {file = "rpds_py-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a90c373ea2975519b58dece25853dbcb9779b05cc46b4819cb1917e3b3215b6"}, + {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16d4477bcb9fbbd7b5b0e4a5d9b493e42026c0bf1f06f723a9353f5153e75d30"}, + {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84b8382a90539910b53a6307f7c35697bc7e6ffb25d9c1d4e998a13e842a5e83"}, + {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4888e117dd41b9d34194d9e31631af70d3d526efc363085e3089ab1a62c32ed1"}, + {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5265505b3d61a0f56618c9b941dc54dc334dc6e660f1592d112cd103d914a6db"}, + {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e75ba609dba23f2c95b776efb9dd3f0b78a76a151e96f96cc5b6b1b0004de66f"}, + {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1791ff70bc975b098fe6ecf04356a10e9e2bd7dc21fa7351c1742fdeb9b4966f"}, + {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d126b52e4a473d40232ec2052a8b232270ed1f8c9571aaf33f73a14cc298c24f"}, + {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c14937af98c4cc362a1d4374806204dd51b1e12dded1ae30645c298e5a5c4cb1"}, + {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3d089d0b88996df627693639d123c8158cff41c0651f646cd8fd292c7da90eaf"}, + {file = "rpds_py-0.20.1-cp39-none-win32.whl", hash = "sha256:653647b8838cf83b2e7e6a0364f49af96deec64d2a6578324db58380cff82aca"}, + {file = "rpds_py-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:fa41a64ac5b08b292906e248549ab48b69c5428f3987b09689ab2441f267d04d"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7a07ced2b22f0cf0b55a6a510078174c31b6d8544f3bc00c2bcee52b3d613f74"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68cb0a499f2c4a088fd2f521453e22ed3527154136a855c62e148b7883b99f9a"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa3060d885657abc549b2a0f8e1b79699290e5d83845141717c6c90c2df38311"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95f3b65d2392e1c5cec27cff08fdc0080270d5a1a4b2ea1d51d5f4a2620ff08d"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2cc3712a4b0b76a1d45a9302dd2f53ff339614b1c29603a911318f2357b04dd2"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d4eea0761e37485c9b81400437adb11c40e13ef513375bbd6973e34100aeb06"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f5179583d7a6cdb981151dd349786cbc318bab54963a192692d945dd3f6435d"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fbb0ffc754490aff6dabbf28064be47f0f9ca0b9755976f945214965b3ace7e"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:a94e52537a0e0a85429eda9e49f272ada715506d3b2431f64b8a3e34eb5f3e75"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:92b68b79c0da2a980b1c4197e56ac3dd0c8a149b4603747c4378914a68706979"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:93da1d3db08a827eda74356f9f58884adb254e59b6664f64cc04cdff2cc19b0d"}, + {file = "rpds_py-0.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:754bbed1a4ca48479e9d4182a561d001bbf81543876cdded6f695ec3d465846b"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ca449520e7484534a2a44faf629362cae62b660601432d04c482283c47eaebab"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9c4cb04a16b0f199a8c9bf807269b2f63b7b5b11425e4a6bd44bd6961d28282c"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb63804105143c7e24cee7db89e37cb3f3941f8e80c4379a0b355c52a52b6780"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:55cd1fa4ecfa6d9f14fbd97ac24803e6f73e897c738f771a9fe038f2f11ff07c"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f8f741b6292c86059ed175d80eefa80997125b7c478fb8769fd9ac8943a16c0"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fc212779bf8411667234b3cdd34d53de6c2b8b8b958e1e12cb473a5f367c338"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ad56edabcdb428c2e33bbf24f255fe2b43253b7d13a2cdbf05de955217313e6"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a3a1e9ee9728b2c1734f65d6a1d376c6f2f6fdcc13bb007a08cc4b1ff576dc5"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e13de156137b7095442b288e72f33503a469aa1980ed856b43c353ac86390519"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:07f59760ef99f31422c49038964b31c4dfcfeb5d2384ebfc71058a7c9adae2d2"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:59240685e7da61fb78f65a9f07f8108e36a83317c53f7b276b4175dc44151684"}, + {file = "rpds_py-0.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:83cba698cfb3c2c5a7c3c6bac12fe6c6a51aae69513726be6411076185a8b24a"}, + {file = "rpds_py-0.20.1.tar.gz", hash = "sha256:e1791c4aabd117653530dccd24108fa03cc6baf21f58b950d0a73c3b3b29a350"}, ] [[package]] @@ -2032,29 +2036,31 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.4.8" +version = "0.12.9" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" groups = ["format", "lint"] files = [ - {file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"}, - {file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aad360893e92486662ef3be0a339c5ca3c1b109e0134fcd37d534d4be9fb8de3"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:284c2e3f3396fb05f5f803c9fffb53ebbe09a3ebe7dda2929ed8d73ded736deb"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7354f921e3fbe04d2a62d46707e569f9315e1a613307f7311a935743c51a764"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:72584676164e15a68a15778fd1b17c28a519e7a0622161eb2debdcdabdc71883"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9678d5c9b43315f323af2233a04d747409d1e3aa6789620083a82d1066a35199"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704977a658131651a22b5ebeb28b717ef42ac6ee3b11e91dc87b633b5d83142b"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05f8d6f0c3cce5026cecd83b7a143dcad503045857bc49662f736437380ad45"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6ea874950daca5697309d976c9afba830d3bf0ed66887481d6bca1673fc5b66a"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fc95aac2943ddf360376be9aa3107c8cf9640083940a8c5bd824be692d2216dc"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:384154a1c3f4bf537bac69f33720957ee49ac8d484bfc91720cc94172026ceed"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e9d5ce97cacc99878aa0d084c626a15cd21e6b3d53fd6f9112b7fc485918e1fa"}, - {file = "ruff-0.4.8-py3-none-win32.whl", hash = "sha256:6d795d7639212c2dfd01991259460101c22aabf420d9b943f153ab9d9706e6a9"}, - {file = "ruff-0.4.8-py3-none-win_amd64.whl", hash = "sha256:e14a3a095d07560a9d6769a72f781d73259655919d9b396c650fc98a8157555d"}, - {file = "ruff-0.4.8-py3-none-win_arm64.whl", hash = "sha256:14019a06dbe29b608f6b7cbcec300e3170a8d86efaddb7b23405cb7f7dcaf780"}, - {file = "ruff-0.4.8.tar.gz", hash = "sha256:16d717b1d57b2e2fd68bd0bf80fb43931b79d05a7131aa477d66fc40fbd86268"}, + {file = "ruff-0.12.9-py3-none-linux_armv6l.whl", hash = "sha256:fcebc6c79fcae3f220d05585229463621f5dbf24d79fdc4936d9302e177cfa3e"}, + {file = "ruff-0.12.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aed9d15f8c5755c0e74467731a007fcad41f19bcce41cd75f768bbd687f8535f"}, + {file = "ruff-0.12.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5b15ea354c6ff0d7423814ba6d44be2807644d0c05e9ed60caca87e963e93f70"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d596c2d0393c2502eaabfef723bd74ca35348a8dac4267d18a94910087807c53"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b15599931a1a7a03c388b9c5df1bfa62be7ede6eb7ef753b272381f39c3d0ff"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d02faa2977fb6f3f32ddb7828e212b7dd499c59eb896ae6c03ea5c303575756"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:17d5b6b0b3a25259b69ebcba87908496e6830e03acfb929ef9fd4c58675fa2ea"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72db7521860e246adbb43f6ef464dd2a532ef2ef1f5dd0d470455b8d9f1773e0"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a03242c1522b4e0885af63320ad754d53983c9599157ee33e77d748363c561ce"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fc83e4e9751e6c13b5046d7162f205d0a7bac5840183c5beebf824b08a27340"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:881465ed56ba4dd26a691954650de6ad389a2d1fdb130fe51ff18a25639fe4bb"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:43f07a3ccfc62cdb4d3a3348bf0588358a66da756aa113e071b8ca8c3b9826af"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:07adb221c54b6bba24387911e5734357f042e5669fa5718920ee728aba3cbadc"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f5cd34fabfdea3933ab85d72359f118035882a01bff15bd1d2b15261d85d5f66"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f6be1d2ca0686c54564da8e7ee9e25f93bdd6868263805f8c0b8fc6a449db6d7"}, + {file = "ruff-0.12.9-py3-none-win32.whl", hash = "sha256:cc7a37bd2509974379d0115cc5608a1a4a6c4bff1b452ea69db83c8855d53f93"}, + {file = "ruff-0.12.9-py3-none-win_amd64.whl", hash = "sha256:6fb15b1977309741d7d098c8a3cb7a30bc112760a00fb6efb7abc85f00ba5908"}, + {file = "ruff-0.12.9-py3-none-win_arm64.whl", hash = "sha256:63c8c819739d86b96d500cce885956a1a48ab056bbcbc61b747ad494b2485089"}, + {file = "ruff-0.12.9.tar.gz", hash = "sha256:fbd94b2e3c623f659962934e52c2bea6fc6da11f667a427a368adaf3af2c866a"}, ] [[package]] @@ -2160,7 +2166,7 @@ files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] -markers = {integration = "python_version < \"3.11\"", unit = "python_full_version <= \"3.11.0a6\""} +markers = {integration = "python_version == \"3.10\"", unit = "python_full_version <= \"3.11.0a6\""} [[package]] name = "toposort" @@ -2201,7 +2207,7 @@ files = [ {file = "typing_extensions-4.12.1-py3-none-any.whl", hash = "sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a"}, {file = "typing_extensions-4.12.1.tar.gz", hash = "sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1"}, ] -markers = {main = "python_version < \"3.11\""} +markers = {main = "python_version == \"3.10\""} [[package]] name = "typing-inspect" @@ -2232,7 +2238,7 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -2439,9 +2445,9 @@ files = [ [package.extras] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "d01675fadc9a418243f95fd4258b4c54ff7511ca62aa377ac47581d0753e160a" +content-hash = "ad22dc7a8a04e3e49c16d019c0e7342cf7faada00c827a8a939a6865b87aa760" diff --git a/pyproject.toml b/pyproject.toml index 3980d1b868..d4693c4114 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,20 +36,20 @@ opentelemetry-exporter-otlp-proto-http = "1.21.0" optional = true [tool.poetry.group.format.dependencies] -ruff = "^0.4.5" +ruff = "^0.12.7" [tool.poetry.group.lint] optional = true [tool.poetry.group.lint.dependencies] -ruff = "^0.4.5" +ruff = "^0.12.7" codespell = "^2.3.0" shellcheck-py = "^0.9.0.5" [tool.poetry.group.unit.dependencies] pytest = "^7.4.0" pytest-mock = "^3.11.1" -coverage = {extras = ["toml"], version = "^7.2.7"} +coverage = { extras = ["toml"], version = "^7.2.7" } parameterized = "^0.9.0" [tool.poetry.group.integration.dependencies] @@ -90,14 +90,37 @@ line-length = 99 [tool.ruff.lint] explicit-preview-rules = true -select = ["A", "E", "W", "F", "C", "N", "D", "I", "CPY001"] +select = [ + "A", + "E", + "W", + "F", + "C", + "N", + "D", + "I", + "B", + "CPY001", + "RUF", + "S", + "SIM", + "UP", + "TC", +] ignore = [ - "D107", # Ignore D107 Missing docstring in __init__ - "E501", # Ignore E501 Line too long + "D107", # Ignore D107 Missing docstring in __init__ + "E501", # Ignore E501 Line too long ] [tool.ruff.lint.per-file-ignores] -"tests/*" = ["D1"] +"tests/*" = [ + "D1", + "D417", + # Asserts + "B011", + # Disable security checks for tests + "S", +] [tool.ruff.lint.flake8-copyright] # Check for properly formatted copyright header in each file diff --git a/scripts/log_rotate_dispatcher.py b/scripts/log_rotate_dispatcher.py index 25ed141b65..4e45834bc5 100644 --- a/scripts/log_rotate_dispatcher.py +++ b/scripts/log_rotate_dispatcher.py @@ -15,9 +15,9 @@ def dispatch(unit: str, charm_directory: str): juju_run = shutil.which("juju-run") juju_exec = shutil.which("juju-exec") - command = juju_exec or juju_run + command = juju_exec or juju_run or "" - subprocess.run( + subprocess.run( # noqa: S603 [ command, "-u", diff --git a/src/charm.py b/src/charm.py index b9ae419c26..c9136ba423 100755 --- a/src/charm.py +++ b/src/charm.py @@ -462,7 +462,7 @@ def join_unit_to_cluster(self) -> None: instance_label in self._mysql.get_cluster_status(from_instance=cluster_primary)[ "defaultreplicaset" - ]["topology"].keys() + ]["topology"] ): self._mysql.execute_remove_instance( connect_instance=cluster_primary, force=True @@ -594,7 +594,7 @@ def _on_peer_relation_joined(self, _) -> None: self.unit_peer_data.setdefault("member-role", "unknown") self.unit_peer_data.setdefault("member-state", "waiting") - def _on_config_changed(self, _: EventBase) -> None: # noqa: C901 + def _on_config_changed(self, _: EventBase) -> None: """Handle the config changed event.""" container = self.unit.get_container(CONTAINER_NAME) if not container.can_connect(): @@ -624,20 +624,20 @@ def _on_config_changed(self, _: EventBase) -> None: # noqa: C901 new_config_dict = self._write_mysqld_configuration() changed_config = compare_dictionaries(previous_config_dict, new_config_dict) - if self.mysql_config.keys_requires_restart(changed_config): - # there are static configurations in changed keys - - if self._mysql.is_mysqld_running(): - logger.info("Configuration change requires restart") - if "loose-audit_log_format" in changed_config: - # plugins are manipulated running daemon - if self.config.plugin_audit_enabled: - self._mysql.install_plugins(["audit_log"]) - else: - self._mysql.uninstall_plugins(["audit_log"]) - # restart the service - self.on[f"{self.restart.name}"].acquire_lock.emit() - return + if ( + self.mysql_config.keys_requires_restart(changed_config) + and self._mysql.is_mysqld_running() + ): + logger.info("Configuration change requires restart") + if "loose-audit_log_format" in changed_config: + # plugins are manipulated running daemon + if self.config.plugin_audit_enabled: + self._mysql.install_plugins(["audit_log"]) + else: + self._mysql.uninstall_plugins(["audit_log"]) + # restart the service + self.on[f"{self.restart.name}"].acquire_lock.emit() + return if dynamic_config := self.mysql_config.filter_static_keys(changed_config): # if only dynamic config changed, apply it @@ -852,7 +852,6 @@ def _on_mysql_pebble_ready(self, event) -> None: MySQLUnableToGetMemberStateError, MySQLNoMemberStateError, MySQLInitializeJujuOperationsTableError, - MySQLCreateClusterError, ): logger.exception("Failed to initialize primary") raise @@ -926,8 +925,7 @@ def _handle_potential_cluster_crash_scenario(self) -> bool: # noqa: C901 self.unit.status = ActiveStatus(self.active_status_message) else: self.unit.status = BlockedStatus("failed to recover cluster.") - finally: - return True + return True if self._mysql.is_cluster_auto_rejoin_ongoing(): logger.info("Cluster auto-rejoin attempts are still ongoing.") diff --git a/src/config.py b/src/config.py index d110076ec8..97bf64ff2d 100644 --- a/src/config.py +++ b/src/config.py @@ -7,7 +7,7 @@ import configparser import logging import re -from typing import Optional +from typing import ClassVar, Optional from charms.data_platform_libs.v0.data_models import BaseConfigModel from charms.mysql.v0.mysql import MAX_CONNECTIONS_FLOOR @@ -20,7 +20,7 @@ class MySQLConfig: """Configuration.""" # Static config requires workload restart - static_config = { + static_config: ClassVar[set[str]] = { "innodb_buffer_pool_size", "innodb_buffer_pool_chunk_size", "group_replication_message_cache_size", diff --git a/src/constants.py b/src/constants.py index 91a64c64c9..af10557b98 100644 --- a/src/constants.py +++ b/src/constants.py @@ -16,11 +16,11 @@ DB_RELATION_NAME = "database" LEGACY_MYSQL = "mysql" LEGACY_MYSQL_ROOT = "mysql-root" -ROOT_PASSWORD_KEY = "root-password" -SERVER_CONFIG_PASSWORD_KEY = "server-config-password" -CLUSTER_ADMIN_PASSWORD_KEY = "cluster-admin-password" -MONITORING_PASSWORD_KEY = "monitoring-password" -BACKUPS_PASSWORD_KEY = "backups-password" +ROOT_PASSWORD_KEY = "root-password" # noqa: S105 +SERVER_CONFIG_PASSWORD_KEY = "server-config-password" # noqa: S105 +CLUSTER_ADMIN_PASSWORD_KEY = "cluster-admin-password" # noqa: S105 +MONITORING_PASSWORD_KEY = "monitoring-password" # noqa: S105 +BACKUPS_PASSWORD_KEY = "backups-password" # noqa: S105 CONTAINER_RESTARTS = "unit-container-restarts" UNIT_ENDPOINTS_KEY = "unit-endpoints" TLS_RELATION = "certificates" @@ -31,7 +31,6 @@ MYSQLSH_LOCATION = "/usr/bin/mysqlsh" MYSQL_DATA_DIR = "/var/lib/mysql" MYSQLD_SOCK_FILE = "/var/run/mysqld/mysqld.sock" -MYSQLSH_SCRIPT_FILE = "/tmp/script.py" MYSQLD_CONFIG_FILE = "/etc/mysql/mysql.conf.d/z-custom.cnf" MYSQLD_INIT_CONFIG_FILE = "/etc/mysql/mysql.conf.d/z-custom-init-file.cnf" MYSQL_LOG_DIR = "/var/log/mysql" diff --git a/src/k8s_helpers.py b/src/k8s_helpers.py index a77cb37810..edf8642475 100644 --- a/src/k8s_helpers.py +++ b/src/k8s_helpers.py @@ -92,7 +92,7 @@ def create_endpoint_services(self, roles: List[str]) -> None: return else: logger.exception("Kubernetes service creation failed: %s", e) - raise KubernetesClientError + raise KubernetesClientError from e def delete_endpoint_services(self, roles: List[str]) -> None: """Delete kubernetes service for endpoints. @@ -146,7 +146,7 @@ def label_pod(self, role: str, pod_name: Optional[str] = None) -> None: logger.error("Kubernetes pod label creation failed: `juju trust` needed") else: logger.exception("Kubernetes pod label creation failed: %s", e) - raise KubernetesClientError + raise KubernetesClientError from e def get_resources_limits(self, container_name: str) -> Dict: """Return resources limits for a given container. @@ -161,16 +161,16 @@ def get_resources_limits(self, container_name: str) -> Dict: if container.name == container_name: return container.resources.limits or {} return {} - except ApiError: - raise KubernetesClientError + except ApiError as e: + raise KubernetesClientError from e def _get_node_name_for_pod(self) -> str: """Return the node name for a given pod.""" try: pod = self.client.get(Pod, name=self.pod_name, namespace=self.namespace) return pod.spec.nodeName - except ApiError: - raise KubernetesClientError + except ApiError as e: + raise KubernetesClientError from e def get_node_allocable_memory(self) -> int: """Return the allocable memory in bytes for a given node. @@ -183,8 +183,8 @@ def get_node_allocable_memory(self) -> int: Node, name=self._get_node_name_for_pod(), namespace=self.namespace ) return any_memory_to_bytes(node.status.allocatable["memory"]) - except ApiError: - raise KubernetesClientError + except ApiError as e: + raise KubernetesClientError from e @retry(stop=stop_after_attempt(60), wait=wait_fixed(1), reraise=True) def wait_service_ready(self, service_endpoint: Tuple[str, int]) -> None: @@ -221,4 +221,4 @@ def set_rolling_update_partition(self, partition: int) -> None: logger.error("Kubernetes statefulset patch failed: `juju trust` needed") else: logger.exception("Kubernetes statefulset patch failed") - raise KubernetesClientError + raise KubernetesClientError from None diff --git a/src/log_rotate_manager.py b/src/log_rotate_manager.py index 2daada89dd..41e1fb4da1 100644 --- a/src/log_rotate_manager.py +++ b/src/log_rotate_manager.py @@ -59,7 +59,7 @@ def start_log_rotate_manager(self): # Use Popen instead of run as the log rotate dispatcher is a long running # process that shouldn't block the event handler - process = subprocess.Popen( + process = subprocess.Popen( # noqa: S603 [ "/usr/bin/python3", "scripts/log_rotate_dispatcher.py", diff --git a/src/mysql_k8s_helpers.py b/src/mysql_k8s_helpers.py index 9cd1978bd7..cfb200c6e6 100644 --- a/src/mysql_k8s_helpers.py +++ b/src/mysql_k8s_helpers.py @@ -189,7 +189,6 @@ def fix_data_dir(self, container: Container) -> None: is available we fix permissions incorrectly with chown. """ paths = container.list_files(MYSQL_DATA_DIR, itself=True) - assert len(paths) == 1, "list_files doesn't return only directory itself" logger.debug(f"Data directory ownership: {paths[0].user}:{paths[0].group}") if paths[0].user != MYSQL_SYSTEM_USER or paths[0].group != MYSQL_SYSTEM_GROUP: logger.debug(f"Changing ownership to {MYSQL_SYSTEM_USER}:{MYSQL_SYSTEM_GROUP}") @@ -203,7 +202,7 @@ def fix_data_dir(self, container: Container) -> None: process.wait() except ExecError as e: logger.error(f"Exited with code {e.exit_code}. Stderr:\n{e.stderr}") - raise MySQLInitialiseMySQLDError(e.stderr or "") + raise MySQLInitialiseMySQLDError(e.stderr or "") from None @retry(reraise=True, stop=stop_after_delay(30), wait=wait_fixed(5)) def initialise_mysqld(self) -> None: @@ -229,7 +228,7 @@ def initialise_mysqld(self) -> None: except (ExecError, ChangeError, PathError, TimeoutError): logger.exception("Failed to initialise MySQL data directory") self.reset_data_dir() - raise MySQLInitialiseMySQLDError + raise MySQLInitialiseMySQLDError from None def reset_root_password_and_start_mysqld(self) -> None: """Reset the root user password and start mysqld.""" @@ -286,7 +285,7 @@ def wait_until_mysql_connection(self, check_port: bool = True) -> None: if check_port and not self.check_mysqlcli_connection(): raise MySQLServiceNotRunningError("Connection with mysqlcli not possible") except MySQLClientError: - raise MySQLServiceNotRunningError + raise MySQLServiceNotRunningError from None logger.debug("MySQL connection possible") @@ -302,7 +301,7 @@ def setup_logrotate_config( # days * minutes/day = amount of rotated files to keep logs_rotations = logs_retention_period * 1440 - with open("templates/logrotate.j2", "r") as file: + with open("templates/logrotate.j2") as file: template = jinja2.Template(file.read()) rendered = template.render( @@ -473,7 +472,7 @@ def create_database(self, database_name: str) -> None: ) except MySQLClientError as e: logger.exception(f"Failed to create database {database_name}", exc_info=e) - raise MySQLCreateDatabaseError(e.message) + raise MySQLCreateDatabaseError(e.message) from None def create_user(self, username: str, password: str, label: str, hostname: str = "%") -> None: """Creates a new user. @@ -505,7 +504,7 @@ def create_user(self, username: str, password: str, label: str, hostname: str = ) except MySQLClientError as e: logger.exception(f"Failed to create user {username}@{hostname}") - raise MySQLCreateUserError(e.message) + raise MySQLCreateUserError(e.message) from None def escalate_user_privileges(self, username: str, hostname: str = "%") -> None: """Escalates the provided user's privileges. @@ -545,11 +544,8 @@ def escalate_user_privileges(self, username: str, hostname: str = "%") -> None: password=self.server_config_password, ) except MySQLClientError as e: - logger.exception( - f"Failed to escalate user privileges for {username}@{hostname}", - exc_info=e, - ) - raise MySQLEscalateUserPrivilegesError(e.message) + logger.exception(f"Failed to escalate user privileges for {username}@{hostname}") + raise MySQLEscalateUserPrivilegesError(e.message) from None def delete_users_with_label(self, label_name: str, label_value: str) -> None: """Delete users with the provided label. @@ -562,7 +558,7 @@ def delete_users_with_label(self, label_name: str, label_value: str) -> None: MySQLDeleteUsersWIthLabelError if there is an error deleting users for the label """ get_label_users = ( - "SELECT CONCAT(user.user, '@', user.host) FROM mysql.user AS user " + "SELECT CONCAT(user.user, '@', user.host) FROM mysql.user AS user " # noqa: S608 "JOIN information_schema.user_attributes AS attributes" " ON (user.user = attributes.user AND user.host = attributes.host) " f'WHERE attributes.attribute LIKE \'%"{label_name}": "{label_value}"%\'', @@ -593,10 +589,9 @@ def delete_users_with_label(self, label_name: str, label_value: str) -> None: ) except MySQLClientError as e: logger.exception( - f"Failed to query and delete users for label {label_name}={label_value}", - exc_info=e, + f"Failed to query and delete users for label {label_name}={label_value}" ) - raise MySQLDeleteUsersWithLabelError(e.message) + raise MySQLDeleteUsersWithLabelError(e.message) from None def is_mysqld_running(self) -> bool: """Returns whether server is connectable and mysqld is running.""" @@ -614,7 +609,7 @@ def stop_mysqld(self) -> None: except ChangeError: error_message = f"Failed to stop service {MYSQLD_SERVICE}" logger.exception(error_message) - raise MySQLStopMySQLDError(error_message) + raise MySQLStopMySQLDError(error_message) from None def start_mysqld(self) -> None: """Starts the mysqld process.""" @@ -627,7 +622,7 @@ def start_mysqld(self) -> None: ): error_message = f"Failed to start service {MYSQLD_SERVICE}" logger.exception(error_message) - raise MySQLStartMySQLDError(error_message) + raise MySQLStartMySQLDError(error_message) from None def restart_mysql_exporter(self) -> None: """Restarts the mysqld exporter service in pebble.""" @@ -729,7 +724,7 @@ def _run_mysqlsh_script( else: self.strip_off_passwords_from_exception(e) logger.exception("Failed to execute mysql-shell command") - raise MySQLClientError + raise MySQLClientError from None def _run_mysqlcli_script( self, @@ -778,7 +773,7 @@ def _run_mysqlcli_script( else: self.strip_off_passwords_from_exception(e) logger.exception("Failed to execute MySQL cli command") - raise MySQLClientError + raise MySQLClientError from None def write_content_to_file( self, @@ -847,7 +842,7 @@ def check_if_mysqld_process_stopped(self) -> bool: return True except ExecError as e: - raise MySQLClientError(e.stderr or "") + raise MySQLClientError(e.stderr or "") from None def get_available_memory(self) -> int: """Get available memory for the container in bytes.""" diff --git a/src/relations/mysql.py b/src/relations/mysql.py index b920b217d6..bcf5592c2a 100644 --- a/src/relations/mysql.py +++ b/src/relations/mysql.py @@ -99,18 +99,19 @@ def _on_config_changed(self, _) -> None: ): return - if isinstance(self.charm.unit.status, ActiveStatus) and self.model.relations.get( - LEGACY_MYSQL + active_and_related = isinstance( + self.charm.unit.status, ActiveStatus + ) and self.model.relations.get(LEGACY_MYSQL) + + if active_and_related and ( + self.charm.config.mysql_interface_database + != self.charm.app_peer_data[MYSQL_RELATION_DATABASE_KEY] + or self.charm.config.mysql_interface_user + != self.charm.app_peer_data[MYSQL_RELATION_USER_KEY] ): - if ( - self.charm.config.mysql_interface_database - != self.charm.app_peer_data[MYSQL_RELATION_DATABASE_KEY] - or self.charm.config.mysql_interface_user - != self.charm.app_peer_data[MYSQL_RELATION_USER_KEY] - ): - self.charm.app.status = BlockedStatus( - "Remove and re-relate `mysql` relations in order to change config" - ) + self.charm.app.status = BlockedStatus( + "Remove and re-relate `mysql` relations in order to change config" + ) def _on_leader_elected(self, _) -> None: """Handle the leader elected event. @@ -195,7 +196,7 @@ def _on_peer_relation_changed(self, event: RelationChangedEvent) -> None: self.model.get_relation(LEGACY_MYSQL).data[self.charm.unit].update(updates) - def _on_mysql_relation_created(self, event: RelationCreatedEvent) -> None: # noqa: C901 + def _on_mysql_relation_created(self, event: RelationCreatedEvent) -> None: """Handle the legacy 'mysql' relation created event. Will set up the database and the scoped application user. The connection @@ -212,7 +213,7 @@ def _on_mysql_relation_created(self, event: RelationCreatedEvent) -> None: # no if ( not self.charm._is_peer_data_set or not self.charm.unit_initialized() - or not self.charm.unit_peer_data.get("member-state") == "online" + or self.charm.unit_peer_data.get("member-state") != "online" ): logger.info("Unit not ready to execute `mysql` relation created. Deferring") event.defer() diff --git a/src/relations/mysql_root.py b/src/relations/mysql_root.py index e2eae8a5a4..5e7206ad4f 100644 --- a/src/relations/mysql_root.py +++ b/src/relations/mysql_root.py @@ -100,7 +100,7 @@ def _on_leader_elected(self, event: LeaderElectedEvent) -> None: if ( not self.charm._is_peer_data_set or not self.charm.unit_initialized() - or not self.charm.unit_peer_data.get("member-state") == "online" + or self.charm.unit_peer_data.get("member-state") != "online" ): logger.info("Unit not ready to execute `mysql` leader elected. Deferring") event.defer() @@ -139,18 +139,19 @@ def _on_config_changed(self, _) -> None: ): return - if isinstance(self.charm.unit.status, ActiveStatus) and self.model.relations.get( - LEGACY_MYSQL_ROOT + active_and_related = isinstance( + self.charm.unit.status, ActiveStatus + ) and self.model.relations.get(LEGACY_MYSQL_ROOT) + + if active_and_related and ( + self.charm.config.mysql_root_interface_database + != self.charm.app_peer_data[MYSQL_ROOT_RELATION_DATABASE_KEY] + or self.charm.config.mysql_root_interface_user + != self.charm.app_peer_data[MYSQL_ROOT_RELATION_USER_KEY] ): - if ( - self.charm.config.mysql_root_interface_database - != self.charm.app_peer_data[MYSQL_ROOT_RELATION_DATABASE_KEY] - or self.charm.config.mysql_root_interface_user - != self.charm.app_peer_data[MYSQL_ROOT_RELATION_USER_KEY] - ): - self.charm.app.status = BlockedStatus( - "Remove and re-relate `mysql` relations in order to change config" - ) + self.charm.app.status = BlockedStatus( + "Remove and re-relate `mysql` relations in order to change config" + ) def _on_mysql_root_relation_created(self, event: RelationCreatedEvent) -> None: """Handle the legacy 'mysql-root' relation created event. @@ -200,7 +201,8 @@ def _on_mysql_root_relation_created(self, event: RelationCreatedEvent) -> None: try: root_password = self.charm.get_secret("app", ROOT_PASSWORD_KEY) - assert root_password, "Root password not set" + if not root_password: + raise MySQLCreateUserError("MySQL root password not found in peer secrets") self.charm._mysql.create_database(database) self.charm._mysql.create_user(username, password, "mysql-root-legacy-relation") if not self.charm._mysql.does_mysql_user_exist("root", "%"): diff --git a/src/upgrade.py b/src/upgrade.py index b84fefe4a6..410ad1708a 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -62,7 +62,7 @@ def __init__(self, charm: "MySQLOperatorCharm", **kwargs) -> None: super().__init__(charm, **kwargs) self.charm = charm - self.framework.observe(getattr(self.charm.on, "mysql_pebble_ready"), self._on_pebble_ready) + self.framework.observe(self.charm.on.mysql_pebble_ready, self._on_pebble_ready) self.framework.observe(self.charm.on.stop, self._on_stop) self.framework.observe( self.charm.on[self.relation_name].relation_changed, self._on_upgrade_changed @@ -95,12 +95,12 @@ def _count_online_instances(status_dict: dict) -> int: # ensure cluster node addresses are consistent in cluster metadata # https://github.com/canonical/mysql-k8s-operator/issues/327 self.charm._mysql.rescan_cluster() - except MySQLRescanClusterError: + except MySQLRescanClusterError as e: raise ClusterNotReadyError( message=fail_message, cause="Failed to rescan cluster", resolution="Check the cluster status", - ) + ) from e if cluster_status := self.charm._mysql.get_cluster_status(extended=True): if _count_online_instances(cluster_status) < self.charm.app.planned_units(): @@ -122,24 +122,24 @@ def _count_online_instances(status_dict: dict) -> int: try: self._pre_upgrade_prepare() - except MySQLSetClusterPrimaryError: + except MySQLSetClusterPrimaryError as e: raise ClusterNotReadyError( message=fail_message, cause="Failed to set primary", resolution="Check the cluster status", - ) - except k8s_helpers.KubernetesClientError: + ) from e + except k8s_helpers.KubernetesClientError as e: raise ClusterNotReadyError( message=fail_message, cause="Failed to patch statefulset", resolution="Check kubernetes access policy", - ) - except MySQLSetVariableError: + ) from e + except MySQLSetVariableError as e: raise ClusterNotReadyError( message=fail_message, cause="Failed to set slow shutdown", resolution="Check the cluster status", - ) + ) from e @override def log_rollback_instructions(self) -> None: @@ -288,7 +288,7 @@ def _set_rolling_update_partition(self, partition: int) -> None: message="Cannot set rolling update partition", cause="Error setting rolling update partition", resolution="Check kubernetes access policy", - ) + ) from None def _check_server_upgradeability(self) -> None: """Check if the server can be upgraded. diff --git a/src/utils.py b/src/utils.py index 4902b42482..47ebe1fa5e 100644 --- a/src/utils.py +++ b/src/utils.py @@ -56,7 +56,7 @@ def any_memory_to_bytes(mem_str) -> int: memory, unit = split_mem(mem_str) unit = unit.upper() if unit not in units: - raise ValueError(f"Invalid memory definition in '{mem_str}'") + raise ValueError(f"Invalid memory definition in '{mem_str}'") from None num = int(memory) return int(num * units[unit]) @@ -67,7 +67,7 @@ def compare_dictionaries(dict1: dict, dict2: dict) -> set: different_keys = set() # exiting keys with different values - for key in dict1.keys(): + for key in dict1: if key in dict2 and dict1[key] != dict2[key]: different_keys.add(key) diff --git a/tests/integration/backups.py b/tests/integration/backups.py index 2f8c0dabbb..e31ef9ebaa 100644 --- a/tests/integration/backups.py +++ b/tests/integration/backups.py @@ -171,9 +171,9 @@ async def pitr_operations( "restore", **{"backup-id": backup_id, "restore-to-time": "bad"} ) await action.wait() - assert ( - action.status == "failed" - ), "restore should fail with bad restore-to-time parameter, but it succeeded" + assert action.status == "failed", ( + "restore should fail with bad restore-to-time parameter, but it succeeded" + ) logger.info(f"Restoring backup {backup_id} with year_before restore-to-time parameter") await juju_.run_action( @@ -183,9 +183,9 @@ async def pitr_operations( apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR], timeout=TIMEOUT, ) - assert await check_test_data_existence( - first_mysql_ip, should_not_exist=[td1, td2] - ), "test data should not exist" + assert await check_test_data_existence(first_mysql_ip, should_not_exist=[td1, td2]), ( + "test data should not exist" + ) logger.info(f"Restoring backup {backup_id} with year_after restore-to-time parameter") await juju_.run_action( @@ -195,9 +195,9 @@ async def pitr_operations( apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR], timeout=TIMEOUT, ) - assert await check_test_data_existence( - first_mysql_ip, should_exist=[td1, td2] - ), "both test data should exist" + assert await check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), ( + "both test data should exist" + ) logger.info(f"Restoring backup {backup_id} with actual restore-to-time parameter") await juju_.run_action( @@ -219,9 +219,9 @@ async def pitr_operations( apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR], timeout=TIMEOUT, ) - assert await check_test_data_existence( - first_mysql_ip, should_exist=[td1, td2] - ), "both test data should exist" + assert await check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), ( + "both test data should exist" + ) clean_backups_from_buckets(cloud_configs, cloud_credentials) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 2b27220a2e..2e1afc4ebd 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -120,9 +120,9 @@ async def get_relation_data( data = yaml.safe_load(raw_data) # Filter the data based on the relation name. relation_data = [v for v in data[unit_name]["relation-info"] if v["endpoint"] == relation_name] - assert ( - relation_data - ), f"no relation data could be grabbed on relation with endpoint {relation_name}" + assert relation_data, ( + f"no relation data could be grabbed on relation with endpoint {relation_name}" + ) return relation_data @@ -144,11 +144,13 @@ async def get_primary_unit( """ cluster_status = await get_cluster_status(unit) - primary_label = [ - label - for label, member in cluster_status["defaultreplicaset"]["topology"].items() - if member["mode"] == "r/w" - ][0] + primary_label = next( + iter([ + label + for label, member in cluster_status["defaultreplicaset"]["topology"].items() + if member["mode"] == "r/w" + ]) + ) primary_name = "/".join(primary_label.rsplit("-", 1)) for unit in ops_test.model.applications[app_name].units: @@ -207,7 +209,7 @@ async def get_server_config_credentials(unit: Unit) -> Dict: return await juju_.run_action(unit, "get-password", username=SERVER_CONFIG_USERNAME) -async def fetch_credentials(unit: Unit, username: str = None) -> Dict: +async def fetch_credentials(unit: Unit, username: str = "") -> Dict: """Helper to run an action to fetch credentials. Args: @@ -217,13 +219,13 @@ async def fetch_credentials(unit: Unit, username: str = None) -> Dict: Returns: A dictionary with the server config username and password """ - if username is None: + if not username: return await juju_.run_action(unit, "get-password") else: return await juju_.run_action(unit, "get-password", username=username) -async def rotate_credentials(unit: Unit, username: str = None, password: str = None) -> Dict: +async def rotate_credentials(unit: Unit, username: str = "", password: str = "") -> Dict: """Helper to run an action to rotate credentials. Args: @@ -234,9 +236,9 @@ async def rotate_credentials(unit: Unit, username: str = None, password: str = N Returns: A dictionary with the action result """ - if username is None: + if not username: return await juju_.run_action(unit, "set-password") - elif password is None: + elif not password: return await juju_.run_action(unit, "set-password", username=username) else: return await juju_.run_action(unit, "set-password", username=username, password=password) @@ -408,9 +410,9 @@ async def get_process_pid( if return_code == 1: return None - assert ( - return_code == 0 - ), f"Failed getting pid, unit={unit_name}, container={container_name}, process={process}" + assert return_code == 0, ( + f"Failed getting pid, unit={unit_name}, container={container_name}, process={process}" + ) stripped_pid = pid.strip() if not stripped_pid: @@ -660,7 +662,7 @@ async def ls_in_unit( unit_name: str, directory: str, container_name: str = CONTAINER_NAME, - exclude_files: list[str] = [], + exclude_files: list[str] = [], # noqa: B006 ) -> list[str]: """Returns the output of ls -la in unit. @@ -718,7 +720,7 @@ async def stop_running_log_rotate_dispatcher(ops_test: OpsTest, unit_name: str): ): raise Exception except RetryError: - raise Exception("Failed to stop the log_rotate_dispatcher process") + raise Exception("Failed to stop the log_rotate_dispatcher process") from None async def stop_running_flush_mysql_job( @@ -750,7 +752,7 @@ async def stop_running_flush_mysql_job( if await get_process_pid(ops_test, unit_name, container_name, "logrotate"): raise Exception except RetryError: - raise Exception("Failed to stop the flush_mysql_logs logrotate process.") + raise Exception("Failed to stop the flush_mysql_logs logrotate process.") from None async def dispatch_custom_event_for_logrotate(ops_test: OpsTest, unit_name: str) -> None: diff --git a/tests/integration/high_availability/high_availability_helpers.py b/tests/integration/high_availability/high_availability_helpers.py index d7f45d6ea2..4e6fe238a8 100644 --- a/tests/integration/high_availability/high_availability_helpers.py +++ b/tests/integration/high_availability/high_availability_helpers.py @@ -286,7 +286,7 @@ def deploy_chaos_mesh(namespace: str) -> None: assert output.decode().count("Running") == 4, "Chaos Mesh not ready" except RetryError: - raise Exception("Chaos Mesh pods not found") + raise Exception("Chaos Mesh pods not found") from None def destroy_chaos_mesh(namespace: str) -> None: @@ -337,9 +337,9 @@ async def send_signal_to_pod_container_process( ) response.run_forever(timeout=5) - assert ( - response.returncode == 0 - ), f"Failed to send {signal_code} signal, unit={unit_name}, container={container_name}, process={process}" + assert response.returncode == 0, ( + f"Failed to send {signal_code} signal, unit={unit_name}, container={container_name}, process={process}" + ) async def get_process_stat( @@ -362,9 +362,9 @@ async def get_process_stat( ] return_code, stat, _ = await ops_test.juju(*get_stat_commands) - assert ( - return_code == 0 - ), f"Failed to get STAT, unit_name={unit_name}, container_name={container_name}, process={process}" + assert return_code == 0, ( + f"Failed to get STAT, unit_name={unit_name}, container_name={container_name}, process={process}" + ) return stat @@ -498,9 +498,9 @@ async def ensure_all_units_continuous_writes_incrementing( ops_test, unit, credentials ) logger.info(f"{max_written_value=} on unit {unit.name}") - assert ( - max_written_value > last_max_written_value - ), "Continuous writes not incrementing" + assert max_written_value > last_max_written_value, ( + "Continuous writes not incrementing" + ) last_max_written_value = max_written_value @@ -509,7 +509,7 @@ def isolate_instance_from_cluster(ops_test: OpsTest, unit_name: str) -> None: """Apply a NetworkChaos file to use chaos-mesh to simulate a network cut.""" with tempfile.NamedTemporaryFile(dir=os.getenv("HOME")) as temp_file: with open( - "tests/integration/high_availability/manifests/chaos_network_loss.yml", "r" + "tests/integration/high_availability/manifests/chaos_network_loss.yml" ) as chaos_network_loss_file: template = string.Template(chaos_network_loss_file.read()) chaos_network_loss = template.substitute( @@ -573,9 +573,9 @@ async def ensure_process_not_running( get_pid_commands = ["ssh", "--container", container_name, unit_name, "pgrep", "-x", process] return_code, pid, _ = await ops_test.juju(*get_pid_commands) - assert ( - return_code != 0 - ), f"Process {process} is still running with pid {pid} on unit {unit_name}, container {container_name}" + assert return_code != 0, ( + f"Process {process} is still running with pid {pid} on unit {unit_name}, container {container_name}" + ) def get_sts_partition(ops_test: OpsTest, app_name: str) -> int: diff --git a/tests/integration/high_availability/test_async_replication.py b/tests/integration/high_availability/test_async_replication.py index 03e90d49ce..dbabb3dec6 100644 --- a/tests/integration/high_availability/test_async_replication.py +++ b/tests/integration/high_availability/test_async_replication.py @@ -250,9 +250,9 @@ async def test_standby_promotion( assert results[0] > 1, "No data was written to the database" cluster_set_status = await get_cluster_status(leader_unit, cluster_set=True) - assert ( - cluster_set_status["clusters"]["cuzco"]["clusterrole"] == "primary" - ), "standby not promoted to primary" + assert cluster_set_status["clusters"]["cuzco"]["clusterrole"] == "primary", ( + "standby not promoted to primary" + ) @markers.juju3 @@ -278,12 +278,12 @@ async def test_failover(ops_test: OpsTest, first_model: Model, second_model: Mod ) cluster_set_status = await get_cluster_status(leader_unit, cluster_set=True) - assert ( - cluster_set_status["clusters"]["lima"]["clusterrole"] == "primary" - ), "standby not promoted to primary" - assert ( - cluster_set_status["clusters"]["cuzco"]["globalstatus"] == "invalidated" - ), "old primary not invalidated" + assert cluster_set_status["clusters"]["lima"]["clusterrole"] == "primary", ( + "standby not promoted to primary" + ) + assert cluster_set_status["clusters"]["cuzco"]["globalstatus"] == "invalidated", ( + "old primary not invalidated" + ) # restore mysqld process for unit in second_model_units: diff --git a/tests/integration/high_availability/test_k8s_endpoints.py b/tests/integration/high_availability/test_k8s_endpoints.py index a98026c5b1..9e8a1338fc 100644 --- a/tests/integration/high_availability/test_k8s_endpoints.py +++ b/tests/integration/high_availability/test_k8s_endpoints.py @@ -85,9 +85,9 @@ async def test_labeling_of_k8s_endpoints(ops_test: OpsTest, charm): ) for primary in cluster_one_primary_addresses: - assert ( - primary in cluster_one_ips - ), f"{primary} (not belonging to cluster 1) should not be in cluster one addresses" + assert primary in cluster_one_ips, ( + f"{primary} (not belonging to cluster 1) should not be in cluster one addresses" + ) assert set(cluster_one_primary_addresses + cluster_one_replica_addresses) == set( cluster_one_ips @@ -106,9 +106,9 @@ async def test_labeling_of_k8s_endpoints(ops_test: OpsTest, charm): ) for primary in cluster_two_primary_addresses: - assert ( - primary in cluster_two_ips - ), f"{primary} (not belonging to cluster w) should not be in cluster two addresses" + assert primary in cluster_two_ips, ( + f"{primary} (not belonging to cluster w) should not be in cluster two addresses" + ) assert set(cluster_two_primary_addresses + cluster_two_replica_addresses) == set( cluster_two_ips diff --git a/tests/integration/high_availability/test_log_rotation.py b/tests/integration/high_availability/test_log_rotation.py index 2a1a43d131..02362a5cac 100644 --- a/tests/integration/high_availability/test_log_rotation.py +++ b/tests/integration/high_availability/test_log_rotation.py @@ -86,9 +86,9 @@ async def test_log_rotation( for file in log_files: # audit.log can be rotated and new file not created until access to db - assert ( - file in ls_output or file == "audit.log" - ), f"❌ files other than log files exist {ls_output}" + assert file in ls_output or file == "audit.log", ( + f"❌ files other than log files exist {ls_output}" + ) logger.info("Dispatching custom event to rotate logs") await dispatch_custom_event_for_logrotate(ops_test, unit.name) diff --git a/tests/integration/high_availability/test_node_drain.py b/tests/integration/high_availability/test_node_drain.py index 99ca1e58d8..12ae43f4b4 100644 --- a/tests/integration/high_availability/test_node_drain.py +++ b/tests/integration/high_availability/test_node_drain.py @@ -36,9 +36,9 @@ async def test_pod_eviction_and_pvc_deletion( logger.info("Waiting until 3 mysql instances are online") # ensure all units in the cluster are online - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application is not fully online" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application is not fully online" + ) logger.info("Ensuring all units have continuous writes incrementing") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) @@ -65,9 +65,9 @@ async def test_pod_eviction_and_pvc_deletion( ) logger.info("Waiting until 3 mysql instances are online") - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application is not fully online after primary pod eviction" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application is not fully online after primary pod eviction" + ) logger.info("Ensuring all units have continuous writes incrementing") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) diff --git a/tests/integration/high_availability/test_replication_reelection.py b/tests/integration/high_availability/test_replication_reelection.py index 53d1940715..9e667fe027 100644 --- a/tests/integration/high_availability/test_replication_reelection.py +++ b/tests/integration/high_availability/test_replication_reelection.py @@ -61,9 +61,9 @@ async def test_kill_primary_check_reelection( assert primary_name != new_primary_name # wait (and retry) until the killed pod is back online in the mysql cluster - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "Old primary has not come back online after being killed" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "Old primary has not come back online after being killed" + ) await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) diff --git a/tests/integration/high_availability/test_replication_scaling.py b/tests/integration/high_availability/test_replication_scaling.py index e25f69b9b8..40079beb18 100644 --- a/tests/integration/high_availability/test_replication_scaling.py +++ b/tests/integration/high_availability/test_replication_scaling.py @@ -54,9 +54,9 @@ async def test_scaling_without_data_loss( # scale up the mysql application await scale_application(ops_test, mysql_application_name, 4) - assert await ensure_n_online_mysql_members( - ops_test, 4 - ), "The cluster is not fully online after scaling up" + assert await ensure_n_online_mysql_members(ops_test, 4), ( + "The cluster is not fully online after scaling up" + ) # ensure value inserted before scale exists in all units for attempt in Retrying(stop=stop_after_delay(10), wait=wait_fixed(2)): @@ -84,9 +84,9 @@ async def test_scaling_without_data_loss( # scale down the mysql application await scale_application(ops_test, mysql_application_name, 3) - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The cluster is not fully online after scaling down" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The cluster is not fully online after scaling down" + ) # ensure data written before scale down is persisted for unit in ops_test.model.applications[mysql_application_name].units: diff --git a/tests/integration/high_availability/test_self_healing_network_cut.py b/tests/integration/high_availability/test_self_healing_network_cut.py index ae3f7cc0d4..0c3769177a 100644 --- a/tests/integration/high_availability/test_self_healing_network_cut.py +++ b/tests/integration/high_availability/test_self_healing_network_cut.py @@ -33,9 +33,9 @@ async def test_network_cut_affecting_an_instance( assert mysql_application_name, "mysql application name is not set" logger.info("Ensuring that there are 3 online mysql members") - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application does not have three online nodes" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application does not have three online nodes" + ) logger.info("Ensuring that all instances have incrementing continuous writes") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) @@ -58,11 +58,13 @@ async def test_network_cut_affecting_an_instance( cluster_status = await get_cluster_status(remaining_units[0]) - isolated_primary_status, isolated_primary_memberrole = [ - (member["status"], member["memberrole"]) - for label, member in cluster_status["defaultreplicaset"]["topology"].items() - if label == primary.name.replace("/", "-") - ][0] + isolated_primary_status, isolated_primary_memberrole = next( + iter([ + (member["status"], member["memberrole"]) + for label, member in cluster_status["defaultreplicaset"]["topology"].items() + if label == primary.name.replace("/", "-") + ]) + ) assert isolated_primary_status == "(missing)" assert isolated_primary_memberrole == "secondary" @@ -93,18 +95,20 @@ async def test_network_cut_affecting_an_instance( new_cluster_status = await get_cluster_status(mysql_units[0]) logger.info("Ensure isolated instance is now secondary") - isolated_primary_status, isolated_primary_memberrole = [ - (member["status"], member["memberrole"]) - for label, member in new_cluster_status["defaultreplicaset"]["topology"].items() - if label == primary.name.replace("/", "-") - ][0] + isolated_primary_status, isolated_primary_memberrole = next( + iter([ + (member["status"], member["memberrole"]) + for label, member in new_cluster_status["defaultreplicaset"]["topology"].items() + if label == primary.name.replace("/", "-") + ]) + ) assert isolated_primary_status == "online" assert isolated_primary_memberrole == "secondary" logger.info("Ensure there are 3 online mysql members") - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application does not have three online nodes" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application does not have three online nodes" + ) logger.info("Ensure all units have incrementing continuous writes") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) diff --git a/tests/integration/high_availability/test_self_healing_process_frozen.py b/tests/integration/high_availability/test_self_healing_process_frozen.py index 5e137693fb..4dbce64067 100644 --- a/tests/integration/high_availability/test_self_healing_process_frozen.py +++ b/tests/integration/high_availability/test_self_healing_process_frozen.py @@ -33,9 +33,9 @@ async def test_freeze_db_process( assert mysql_application_name, "mysql application name is not set" # ensure all units in the cluster are online - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application is not fully online" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application is not fully online" + ) logger.info("Ensuring that all units continuous writes incrementing") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) @@ -76,9 +76,9 @@ async def test_freeze_db_process( # retring as it may take time for the cluster to recognize that the primary process is stopped for attempt in Retrying(stop=stop_after_delay(15 * 60), wait=wait_fixed(10)): with attempt: - assert await ensure_n_online_mysql_members( - ops_test, 2, remaining_online_units - ), "The deployed mysql application does not have two online nodes" + assert await ensure_n_online_mysql_members(ops_test, 2, remaining_online_units), ( + "The deployed mysql application does not have two online nodes" + ) new_primary = await get_primary_unit( ops_test, remaining_online_units[0], mysql_application_name @@ -112,9 +112,9 @@ async def test_freeze_db_process( mysql_process_stat_after_sigcont = await get_process_stat( ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME ) - assert ( - "T" not in mysql_process_stat_after_sigcont - ), "mysql process is not started after sigcont" + assert "T" not in mysql_process_stat_after_sigcont, ( + "mysql process is not started after sigcont" + ) assert ( "R" in mysql_process_stat_after_sigcont or "S" in mysql_process_stat_after_sigcont @@ -124,9 +124,9 @@ async def test_freeze_db_process( new_mysql_pid = await get_process_pid( ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME ) - assert ( - new_mysql_pid == mysql_pid - ), "mysql process id is not the same as it was before process was stopped" + assert new_mysql_pid == mysql_pid, ( + "mysql process id is not the same as it was before process was stopped" + ) # wait for possible recovery of the old primary async with ops_test.fast_forward("60s"): @@ -138,9 +138,9 @@ async def test_freeze_db_process( ) logger.info("Ensuring that there are 3 online mysql members") - assert await ensure_n_online_mysql_members( - ops_test, 3, remaining_online_units - ), "The deployed mysql application does not have three online nodes" + assert await ensure_n_online_mysql_members(ops_test, 3, remaining_online_units), ( + "The deployed mysql application does not have three online nodes" + ) logger.info("Ensure all units continuous writes incrementing") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) diff --git a/tests/integration/high_availability/test_self_healing_process_killed.py b/tests/integration/high_availability/test_self_healing_process_killed.py index 8de9720db8..c917b8a37d 100644 --- a/tests/integration/high_availability/test_self_healing_process_killed.py +++ b/tests/integration/high_availability/test_self_healing_process_killed.py @@ -34,9 +34,9 @@ async def test_kill_db_process( logger.info("Waiting until 3 mysql instances are online") # ensure all units in the cluster are online - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application is not fully online" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application is not fully online" + ) logger.info("Ensuring all units have continuous writes incrementing") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) @@ -61,22 +61,22 @@ async def test_kill_db_process( time.sleep(10) logger.info("Waiting until 3 mysql instances are online") - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The mysql application is not fully online after sending SIGKILL to primary" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The mysql application is not fully online after sending SIGKILL to primary" + ) # ensure that the mysqld process got restarted and has a new process id new_mysql_pid = await get_process_pid( ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME ) - assert ( - mysql_pid != new_mysql_pid - ), "The mysql process id is the same after sending it a SIGKILL" + assert mysql_pid != new_mysql_pid, ( + "The mysql process id is the same after sending it a SIGKILL" + ) new_primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name) - assert ( - primary.name != new_primary.name - ), "The mysql primary has not been reelected after sending a SIGKILL" + assert primary.name != new_primary.name, ( + "The mysql primary has not been reelected after sending a SIGKILL" + ) logger.info("Ensuring all units have continuous writes incrementing") # ensure continuous writes still incrementing for all units diff --git a/tests/integration/high_availability/test_self_healing_stop_all.py b/tests/integration/high_availability/test_self_healing_stop_all.py index e87817e3e2..acca006b3b 100644 --- a/tests/integration/high_availability/test_self_healing_stop_all.py +++ b/tests/integration/high_availability/test_self_healing_stop_all.py @@ -34,9 +34,9 @@ async def test_graceful_full_cluster_crash_test( assert mysql_application_name, "mysql application name is not set" logger.info("Ensure there are 3 online mysql members") - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application does not have three online nodes" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application does not have three online nodes" + ) logger.info("Ensure that all units have incrementing continuous writes") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) diff --git a/tests/integration/high_availability/test_self_healing_stop_primary.py b/tests/integration/high_availability/test_self_healing_stop_primary.py index cc827cb563..ce71014087 100644 --- a/tests/integration/high_availability/test_self_healing_stop_primary.py +++ b/tests/integration/high_availability/test_self_healing_stop_primary.py @@ -33,9 +33,9 @@ async def test_graceful_crash_of_primary( assert mysql_application_name, "mysql application name is not set" logger.info("Ensuring that there are 3 online mysql members") - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application does not have three online nodes" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application does not have three online nodes" + ) logger.info("Ensuring that all units have incrementing continuous writes") await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials) @@ -59,9 +59,9 @@ async def test_graceful_crash_of_primary( new_mysql_pid = await get_process_pid( ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME ) - assert ( - new_mysql_pid == mysql_pid - ), "mysql process id is not the same as it was before process was stopped" + assert new_mysql_pid == mysql_pid, ( + "mysql process id is not the same as it was before process was stopped" + ) remaining_online_units = [ unit @@ -73,9 +73,9 @@ async def test_graceful_crash_of_primary( # retrying as it may take time for the cluster to recognize that the primary process is stopped for attempt in Retrying(stop=stop_after_delay(2 * 60), wait=wait_fixed(10)): with attempt: - assert await ensure_n_online_mysql_members( - ops_test, 3 - ), "The deployed mysql application does not have three online nodes" + assert await ensure_n_online_mysql_members(ops_test, 3), ( + "The deployed mysql application does not have three online nodes" + ) new_primary = await get_primary_unit( ops_test, remaining_online_units[0], mysql_application_name diff --git a/tests/integration/high_availability/test_upgrade.py b/tests/integration/high_availability/test_upgrade.py index 15d6959f81..4b942444c0 100644 --- a/tests/integration/high_availability/test_upgrade.py +++ b/tests/integration/high_availability/test_upgrade.py @@ -6,6 +6,7 @@ import pathlib import shutil import zipfile +from contextlib import suppress from pathlib import Path from time import sleep from typing import Union @@ -118,14 +119,13 @@ async def test_upgrade_from_edge(ops_test: OpsTest, charm, continuous_writes, cr logger.info("Resume upgrade") while get_sts_partition(ops_test, MYSQL_APP_NAME) == 2: # resume action sometime fails in CI, no clear reason - try: - await juju_.run_action(leader_unit, "resume-upgrade") - except AssertionError: + + with suppress(AssertionError): # ignore action return error as it is expected when # the leader unit is the next one to be upgraded # due it being immediately rolled when the partition # is patched in the statefulset - pass + await juju_.run_action(leader_unit, "resume-upgrade") logger.info("Wait for upgrade to complete") await ops_test.model.block_until( @@ -215,7 +215,7 @@ async def inject_dependency_fault( ) -> None: """Inject a dependency fault into the mysql charm.""" # Open dependency.json and load current charm version - with open("src/dependency.json", "r") as dependency_file: + with open("src/dependency.json") as dependency_file: current_charm_version = json.load(dependency_file)["charm"]["version"] # query running dependency to overwrite with incompatible version diff --git a/tests/integration/high_availability/test_upgrade_rollback_incompat.py b/tests/integration/high_availability/test_upgrade_rollback_incompat.py index f01331ffc7..3de305a0af 100644 --- a/tests/integration/high_availability/test_upgrade_rollback_incompat.py +++ b/tests/integration/high_availability/test_upgrade_rollback_incompat.py @@ -167,12 +167,12 @@ async def test_rollback(ops_test, charm) -> None: ) -class InjectFailure(object): +class InjectFailure: def __init__(self, path: str, original_str: str, replace_str: str): self.path = path self.original_str = original_str self.replace_str = replace_str - with open(path, "r") as file: + with open(path) as file: self.original_content = file.read() def __enter__(self): @@ -204,7 +204,7 @@ async def charm_local_build(ops_test: OpsTest, charm, refresh: bool = False): charm = pathlib.Path(shutil.copy(charm, f"local-{pathlib.Path(charm).stem}.charm")) for path in update_files: - with open(path, "r") as f: + with open(path) as f: content = f.read() with ZipFile(charm, mode="a") as charm_zip: diff --git a/tests/integration/test_backup_aws.py b/tests/integration/test_backup_aws.py index 0e6a9d213d..15c0fb0280 100644 --- a/tests/integration/test_backup_aws.py +++ b/tests/integration/test_backup_aws.py @@ -170,7 +170,7 @@ async def test_backup( output = results["backups"] new_backup_ids = [line.split("|")[0].strip() for line in output.split("\n")[2:]] - assert sorted(new_backup_ids) == sorted(backup_ids + [backup_id]) + assert sorted(new_backup_ids) == sorted([*backup_ids, backup_id]) # insert data into cluster after backup logger.info("Inserting value after backup") @@ -260,7 +260,7 @@ async def test_restore_on_same_cluster( logger.info("Ensuring inserted values before backup and after restore exist on all units") for unit in ops_test.model.applications[mysql_application_name].units: await ops_test.model.block_until( - lambda: unit.workload_status == "active", + lambda: unit.workload_status == "active", # noqa: B023 timeout=TIMEOUT, ) @@ -394,7 +394,7 @@ async def test_restore_on_new_cluster( logger.info("Ensuring inserted values before backup and after restore exist on all units") for unit in ops_test.model.applications[new_mysql_application_name].units: await ops_test.model.block_until( - lambda: unit.workload_status == "active", + lambda: unit.workload_status == "active", # noqa: B023 timeout=TIMEOUT, ) diff --git a/tests/integration/test_backup_ceph.py b/tests/integration/test_backup_ceph.py index a528977d0a..85a8a2d382 100644 --- a/tests/integration/test_backup_ceph.py +++ b/tests/integration/test_backup_ceph.py @@ -226,7 +226,7 @@ async def test_backup( output = results["backups"] new_backup_ids = [line.split("|")[0].strip() for line in output.split("\n")[2:]] - assert sorted(new_backup_ids) == sorted(backup_ids + [backup_id]) + assert sorted(new_backup_ids) == sorted([*backup_ids, backup_id]) # insert data into cluster after backup logger.info("Inserting value after backup") @@ -316,7 +316,7 @@ async def test_restore_on_same_cluster( logger.info("Ensuring inserted values before backup and after restore exist on all units") for unit in ops_test.model.applications[mysql_application_name].units: await ops_test.model.block_until( - lambda: unit.workload_status == "active", + lambda: unit.workload_status == "active", # noqa: B023 timeout=TIMEOUT, ) @@ -450,7 +450,7 @@ async def test_restore_on_new_cluster( logger.info("Ensuring inserted values before backup and after restore exist on all units") for unit in ops_test.model.applications[new_mysql_application_name].units: await ops_test.model.block_until( - lambda: unit.workload_status == "active", + lambda: unit.workload_status == "active", # noqa: B023 timeout=TIMEOUT, ) diff --git a/tests/integration/test_backup_gcp.py b/tests/integration/test_backup_gcp.py index dfd6e8f469..063047cd01 100644 --- a/tests/integration/test_backup_gcp.py +++ b/tests/integration/test_backup_gcp.py @@ -170,7 +170,7 @@ async def test_backup( output = results["backups"] new_backup_ids = [line.split("|")[0].strip() for line in output.split("\n")[2:]] - assert sorted(new_backup_ids) == sorted(backup_ids + [backup_id]) + assert sorted(new_backup_ids) == sorted([*backup_ids, backup_id]) # insert data into cluster after backup logger.info("Inserting value after backup") @@ -260,7 +260,7 @@ async def test_restore_on_same_cluster( logger.info("Ensuring inserted values before backup and after restore exist on all units") for unit in ops_test.model.applications[mysql_application_name].units: await ops_test.model.block_until( - lambda: unit.workload_status == "active", + lambda: unit.workload_status == "active", # noqa: B023 timeout=TIMEOUT, ) @@ -394,7 +394,7 @@ async def test_restore_on_new_cluster( logger.info("Ensuring inserted values before backup and after restore exist on all units") for unit in ops_test.model.applications[new_mysql_application_name].units: await ops_test.model.block_until( - lambda: unit.workload_status == "active", + lambda: unit.workload_status == "active", # noqa: B023 timeout=TIMEOUT, ) diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index b9b1e3563e..1f954b15f4 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -320,9 +320,9 @@ async def test_exporter_endpoints(ops_test: OpsTest) -> None: resp = http.request("GET", mysql_exporter_url) assert resp.status == 200, "Can't get metrics from mysql_exporter" - assert "mysql_exporter_last_scrape_error 0" in resp.data.decode( - "utf8" - ), "Scrape error in mysql_exporter" + assert "mysql_exporter_last_scrape_error 0" in resp.data.decode("utf8"), ( + "Scrape error in mysql_exporter" + ) @pytest.mark.abort_on_fail diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py index 2ea2a951b6..7c929219bc 100644 --- a/tests/integration/test_tls.py +++ b/tests/integration/test_tls.py @@ -32,17 +32,11 @@ if juju_.has_secrets: tls_app_name = "self-signed-certificates" - if architecture.architecture == "arm64": - tls_channel = "latest/edge" - else: - tls_channel = "latest/stable" + tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" tls_config = {"ca-common-name": "Test CA"} else: tls_app_name = "tls-certificates-operator" - if architecture.architecture == "arm64": - tls_channel = "legacy/edge" - else: - tls_channel = "legacy/stable" + tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} @@ -105,13 +99,13 @@ async def test_connection_before_tls(ops_test: OpsTest) -> None: unit_ip = await get_unit_address(ops_test, unit.name) config["host"] = unit_ip - assert is_connection_possible( - config, **{"ssl_disabled": False} - ), f"❌ Encrypted connection not possible to unit {unit.name} with disabled TLS" + assert is_connection_possible(config, **{"ssl_disabled": False}), ( + f"❌ Encrypted connection not possible to unit {unit.name} with disabled TLS" + ) - assert is_connection_possible( - config, **{"ssl_disabled": True} - ), f"❌ Unencrypted connection not possible to unit {unit.name} with disabled TLS" + assert is_connection_possible(config, **{"ssl_disabled": True}), ( + f"❌ Unencrypted connection not possible to unit {unit.name} with disabled TLS" + ) @pytest.mark.abort_on_fail @@ -140,13 +134,13 @@ async def test_enable_tls(ops_test: OpsTest) -> None: for unit in all_units: unit_ip = await get_unit_address(ops_test, unit.name) config["host"] = unit_ip - assert is_connection_possible( - config, **{"ssl_disabled": False} - ), f"❌ Encrypted connection not possible to unit {unit.name} with enabled TLS" + assert is_connection_possible(config, **{"ssl_disabled": False}), ( + f"❌ Encrypted connection not possible to unit {unit.name} with enabled TLS" + ) - assert not is_connection_possible( - config, **{"ssl_disabled": True} - ), f"❌ Unencrypted connection possible to unit {unit.name} with enabled TLS" + assert not is_connection_possible(config, **{"ssl_disabled": True}), ( + f"❌ Unencrypted connection possible to unit {unit.name} with enabled TLS" + ) # test for ca presence in a given unit logger.info("Assert TLS file exists") @@ -184,22 +178,22 @@ async def test_rotate_tls_key(ops_test: OpsTest) -> None: ops_test, unit.name, f"/var/lib/mysql/{TLS_SSL_CERT_FILE}" ) - assert ( - new_cert_md5 != original_tls[unit.name]["cert"] - ), f"cert for {unit.name} was not updated." + assert new_cert_md5 != original_tls[unit.name]["cert"], ( + f"cert for {unit.name} was not updated." + ) # Asserting only encrypted connection should be possible logger.info("Asserting connections after relation") for unit in all_units: unit_ip = await get_unit_address(ops_test, unit.name) config["host"] = unit_ip - assert is_connection_possible( - config, **{"ssl_disabled": False} - ), f"❌ Encrypted connection not possible to unit {unit.name} with enabled TLS" + assert is_connection_possible(config, **{"ssl_disabled": False}), ( + f"❌ Encrypted connection not possible to unit {unit.name} with enabled TLS" + ) - assert not is_connection_possible( - config, **{"ssl_disabled": True} - ), f"❌ Unencrypted connection possible to unit {unit.name} with enabled TLS" + assert not is_connection_possible(config, **{"ssl_disabled": True}), ( + f"❌ Unencrypted connection possible to unit {unit.name} with enabled TLS" + ) @pytest.mark.abort_on_fail @@ -220,10 +214,10 @@ async def test_disable_tls(ops_test: OpsTest) -> None: for unit in all_units: unit_ip = await get_unit_address(ops_test, unit.name) config["host"] = unit_ip - assert is_connection_possible( - config, **{"ssl_disabled": False} - ), f"❌ Encrypted connection not possible to unit {unit.name} after relation removal" + assert is_connection_possible(config, **{"ssl_disabled": False}), ( + f"❌ Encrypted connection not possible to unit {unit.name} after relation removal" + ) - assert is_connection_possible( - config, **{"ssl_disabled": True} - ), f"❌ Unencrypted connection not possible to unit {unit.name} after relation removal" + assert is_connection_possible(config, **{"ssl_disabled": True}), ( + f"❌ Unencrypted connection not possible to unit {unit.name} after relation removal" + ) diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 27ee26be73..2f7bb3673d 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -88,7 +88,7 @@ def test_database_requested( self.database_relation_id, self.harness.charm.app ) database_relation = self.charm.model.get_relation(DB_RELATION_NAME) - app_unit = list(database_relation.units)[0] + app_unit = next(iter(database_relation.units)) self.assertEqual(database_relation_databag, {}) self.assertEqual(database_relation.data.get(app_unit), {}) From 484af81fcbda38a1eb083c13c9f7e1980ef8883f Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Wed, 20 Aug 2025 14:37:04 -0300 Subject: [PATCH 2/6] missing one --- src/charm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/charm.py b/src/charm.py index c9136ba423..4d5313b110 100755 --- a/src/charm.py +++ b/src/charm.py @@ -442,7 +442,8 @@ def join_unit_to_cluster(self) -> None: # add random delay to mitigate collisions when multiple units are joining # due the difference between the time we test for locks and acquire them - sleep(random.uniform(0, 1.5)) + # Not used for cryptographic purpose + sleep(random.uniform(0, 1.5)) # noqa: S311 if self._mysql.are_locks_acquired(from_instance=lock_instance or cluster_primary): self.unit.status = WaitingStatus("waiting to join the cluster") From 4f6fc086eeca01344318c46689bcc0ef0812a711 Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Wed, 20 Aug 2025 14:57:42 -0300 Subject: [PATCH 3/6] add placeholder function for followup PR --- src/charm.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/charm.py b/src/charm.py index 4d5313b110..87802a25bf 100755 --- a/src/charm.py +++ b/src/charm.py @@ -318,6 +318,10 @@ def text_logs(self) -> list: return text_logs + def update_endpoints(self) -> None: + """Temp placeholder.""" + pass + def unit_initialized(self, raise_exceptions: bool = False) -> bool: """Return whether a unit is started. From a6210c96a7f9f31674783b06be287591612570d1 Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Wed, 20 Aug 2025 15:06:17 -0300 Subject: [PATCH 4/6] locally build fqdn for units --- src/charm.py | 20 ++------------------ src/k8s_helpers.py | 5 +++++ 2 files changed, 7 insertions(+), 18 deletions(-) diff --git a/src/charm.py b/src/charm.py index 87802a25bf..81070374c4 100755 --- a/src/charm.py +++ b/src/charm.py @@ -92,7 +92,7 @@ TRACING_PROTOCOL, TRACING_RELATION_NAME, ) -from k8s_helpers import KubernetesHelpers +from k8s_helpers import KubernetesHelpers, k8s_domain from log_rotate_manager import LogRotateManager from mysql_k8s_helpers import MySQL, MySQLInitialiseMySQLDError from relations.mysql import MySQLRelation @@ -354,23 +354,7 @@ def get_unit_address(self, unit: Unit, relation_name: str = PEER) -> str: Translate juju unit name to resolvable hostname. """ unit_hostname = self.get_unit_hostname(unit.name) - unit_dns_domain = getfqdn(self.get_unit_hostname(unit.name)) - - # When fully propagated, DNS domain name should contain unit hostname. - # For example: - # Hostname: mysql-k8s-0.mysql-k8s-endpoints - # Fully propagated: mysql-k8s-0.mysql-k8s-endpoints.dev.svc.cluster.local - # Not propagated yet: 10-1-142-191.mysql-k8s.dev.svc.cluster.local - if unit_hostname not in unit_dns_domain: - logger.warning( - "get_unit_address: unit DNS domain name is not fully propagated yet, trying again" - ) - raise RuntimeError("unit DNS domain name is not fully propagated yet") - if unit_dns_domain == unit_hostname: - logger.warning("Can't get fully qualified domain name for unit. IS DNS not ready?") - raise RuntimeError("Can't get unit fqdn") - - return dotappend(unit_dns_domain) + return dotappend(f"{unit_hostname}.{self.model.name}.svc.{k8s_domain()}") def is_unit_busy(self) -> bool: """Returns whether the unit is busy.""" diff --git a/src/k8s_helpers.py b/src/k8s_helpers.py index edf8642475..d03b85bf6a 100644 --- a/src/k8s_helpers.py +++ b/src/k8s_helpers.py @@ -28,6 +28,11 @@ from charm import MySQLOperatorCharm +def k8s_domain() -> str: + """Return the Kubernetes domain.""" + return socket.getfqdn("kubernetes.default").split(".svc.")[-1] + + class KubernetesClientError(Exception): """Exception raised when client can't execute.""" From 37846f606eb0f8f2e036ea00004b51e7d05385aa Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Wed, 20 Aug 2025 17:27:02 -0300 Subject: [PATCH 5/6] update parameters --- tests/integration/high_availability/test_async_replication.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/high_availability/test_async_replication.py b/tests/integration/high_availability/test_async_replication.py index dbabb3dec6..92067371e2 100644 --- a/tests/integration/high_availability/test_async_replication.py +++ b/tests/integration/high_availability/test_async_replication.py @@ -242,6 +242,7 @@ async def test_standby_promotion( await juju_.run_action( leader_unit, "promote-to-primary", + **{"scope": "cluster"}, ) results = await get_max_written_value(first_model, second_model) @@ -274,7 +275,7 @@ async def test_failover(ops_test: OpsTest, first_model: Model, second_model: Mod await juju_.run_action( leader_unit, "promote-to-primary", - **{"--wait": "5m", "force": True}, + **{"--wait": "5m", "scope": "cluster", "force": True}, ) cluster_set_status = await get_cluster_status(leader_unit, cluster_set=True) From 16b5dcab97899f33f737640bd8242cbe540f62fb Mon Sep 17 00:00:00 2001 From: Paulo Machado Date: Tue, 26 Aug 2025 15:36:12 -0300 Subject: [PATCH 6/6] remove placeholder --- src/charm.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/charm.py b/src/charm.py index a1ffe80094..d58057de80 100755 --- a/src/charm.py +++ b/src/charm.py @@ -319,10 +319,6 @@ def text_logs(self) -> list: return text_logs - def update_endpoints(self) -> None: - """Temp placeholder.""" - pass - def unit_initialized(self, raise_exceptions: bool = False) -> bool: """Return whether a unit is started.