-
Notifications
You must be signed in to change notification settings - Fork 14
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
DPE-3879 update endpoint on upgrade #426
base: main
Are you sure you want to change the base?
Changes from 4 commits
144ce84
a56bca1
b6c1233
a513b99
b60ee97
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -20,7 +20,7 @@ | |
) | ||
from ops.charm import RelationBrokenEvent, RelationDepartedEvent, RelationJoinedEvent | ||
from ops.framework import Object | ||
from ops.model import BlockedStatus | ||
from ops.model import BlockedStatus, Relation, Unit | ||
|
||
from constants import DB_RELATION_NAME, PASSWORD_LENGTH, PEER | ||
from utils import generate_random_password | ||
|
@@ -55,29 +55,30 @@ def __init__(self, charm: "MySQLOperatorCharm"): | |
self.framework.observe(self.charm.on.leader_elected, self._update_endpoints_all_relations) | ||
self.framework.observe(self.charm.on.update_status, self._update_endpoints_all_relations) | ||
|
||
@property | ||
def active_relations(self) -> list[Relation]: | ||
"""Return the active relations.""" | ||
relation_data = self.database.fetch_relation_data() | ||
return [ | ||
rel | ||
for rel in self.model.relations[DB_RELATION_NAME] | ||
if rel.id in relation_data # rel.id in relation data after on_database_requested | ||
] | ||
|
||
def _update_endpoints_all_relations(self, _): | ||
"""Update endpoints for all relations.""" | ||
if not self.charm.unit.is_leader(): | ||
return | ||
# get all relations involving the database relation | ||
relations = list(self.model.relations[DB_RELATION_NAME]) | ||
# check if there are relations in place | ||
if len(relations) == 0: | ||
return | ||
|
||
if not self.charm.cluster_initialized or not self.charm.unit_peer_data.get( | ||
"unit-initialized" | ||
): | ||
logger.debug("Waiting cluster/unit to be initialized") | ||
return | ||
|
||
relation_data = self.database.fetch_relation_data() | ||
# for all relations update the read-only-endpoints | ||
for relation in relations: | ||
for relation in self.active_relations: | ||
# check if the on_database_requested has been executed | ||
if relation.id not in relation_data: | ||
logger.debug("On database requested not happened yet! Nothing to do in this case") | ||
continue | ||
self._update_endpoints(relation.id, relation.app.name) | ||
|
||
def _on_relation_departed(self, event: RelationDepartedEvent): | ||
|
@@ -207,6 +208,7 @@ def _on_database_requested(self, event: DatabaseRequestedEvent): | |
# get base relation data | ||
relation_id = event.relation.id | ||
db_name = event.database | ||
assert db_name, "Database name must be provided" | ||
extra_user_roles = [] | ||
if event.extra_user_roles: | ||
extra_user_roles = event.extra_user_roles.split(",") | ||
|
@@ -272,8 +274,8 @@ def _on_database_broken(self, event: RelationBrokenEvent) -> None: | |
# https://github.com/canonical/mysql-operator/issues/32 | ||
return | ||
|
||
relation_id = event.relation.id | ||
try: | ||
relation_id = event.relation.id | ||
self.charm._mysql.delete_users_for_relation(relation_id) | ||
logger.info(f"Removed user for relation {relation_id}") | ||
except (MySQLDeleteUsersForRelationError, KeyError): | ||
|
@@ -309,3 +311,45 @@ def _on_database_provides_relation_departed(self, event: RelationDepartedEvent) | |
logger.info(f"Removed router from metadata {user.router_id}") | ||
except MySQLRemoveRouterFromMetadataError: | ||
logger.error(f"Failed to remove router from metadata with ID {user.router_id}") | ||
|
||
def remove_unit_from_endpoints(self, unit: Unit) -> None: | ||
"""Remove a unit from the endpoints for related applications. | ||
|
||
Args: | ||
unit (ops.Unit): The the unit to be removed. | ||
""" | ||
if not self.charm.unit.is_leader(): | ||
return | ||
|
||
if not self.charm.cluster_initialized: | ||
logger.debug("Waiting cluster to be initialized") | ||
return | ||
|
||
unit_address = self.charm.get_unit_ip(unit) | ||
|
||
# filter out the unit address from the (ro)endpoints | ||
for relation in self.active_relations: | ||
# rw endpoints | ||
endpoints = ( | ||
self.database.fetch_my_relation_field(relation.id, "endpoints", DB_RELATION_NAME) | ||
or "" | ||
) | ||
if unit_address in endpoints: | ||
self.database.set_endpoints( | ||
relation.id, | ||
",".join([e for e in endpoints.split(",") if unit_address not in e]), | ||
) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can this set read-write endpoints to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, and it's intentional - a client app should be able to handle (and wait for the endpoint to be set) |
||
continue | ||
|
||
# ro endpoints | ||
ro_endpoints = ( | ||
self.database.fetch_my_relation_field( | ||
relation.id, "read-only-endpoints", DB_RELATION_NAME | ||
) | ||
or "" | ||
) | ||
if unit_address in ro_endpoints: | ||
self.database.set_read_only_endpoints( | ||
relation.id, | ||
",".join([e for e in ro_endpoints.split(",") if unit_address not in e]), | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ | |
VersionError, | ||
) | ||
from charms.mysql.v0.mysql import ( | ||
MySQLGetClusterEndpointsError, | ||
MySQLGetMySQLVersionError, | ||
MySQLServerNotUpgradableError, | ||
MySQLSetClusterPrimaryError, | ||
|
@@ -171,6 +172,11 @@ def _on_upgrade_charm_check_legacy(self, event) -> None: | |
@override | ||
def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: # noqa: C901 | ||
"""Handle the upgrade granted event.""" | ||
if self.charm.unit.is_leader(): | ||
# preemptively change primary on leader unit | ||
# we assume the leader is primary, since the switchover is done on pre-upgrade-check | ||
self._primary_switchover() | ||
|
||
Comment on lines
+177
to
+179
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. question: if leader is not primary (e.g. switchover after pre-upgrade-check), will things break? |
||
try: | ||
self.charm.unit.status = MaintenanceStatus("stopping services..") | ||
self.charm._mysql.stop_mysqld() | ||
|
@@ -260,6 +266,21 @@ def _recover_single_unit_cluster(self) -> None: | |
logger.debug("Recovering single unit cluster") | ||
self.charm._mysql.reboot_from_complete_outage() | ||
|
||
def _primary_switchover(self) -> None: | ||
"""Switchover primary to the first available RO endpoint.""" | ||
try: | ||
_, ro_endpoints, _ = self.charm._mysql.get_cluster_endpoints(get_ips=False) | ||
if not ro_endpoints: | ||
# no ro endpoints, can't switchover | ||
return | ||
new_primary_address = ro_endpoints.split(",")[0] | ||
self.charm._mysql.set_cluster_primary(new_primary_address) | ||
except (MySQLSetClusterPrimaryError, MySQLGetClusterEndpointsError): | ||
# If upgrading mysql version, older mysqlsh will fail to set primary | ||
logger.warning( | ||
"Failed to switchover primary. Endpoints will be updated after upgrade." | ||
) | ||
|
||
def _on_upgrade_changed(self, _) -> None: | ||
"""Handle the upgrade changed event. | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
where is this method being used?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
b60ee97
nowhere, removed. it would be useful if upgrade happened in distinct hook calls, which is not the case