diff --git a/actions.yaml b/actions.yaml
index 6e3ced3d8b..9214dcd5bd 100644
--- a/actions.yaml
+++ b/actions.yaml
@@ -62,3 +62,5 @@ set-tls-private-key:
     private-key:
       type: string
       description: The content of private key for communications with clients. Content will be auto-generated if this option is not specified.
+reenable-oversee-users:
+  description: Reenable purging of managed credentials after a standby cluster is promoted.
diff --git a/src/relations/async_replication.py b/src/relations/async_replication.py
index 49eb672a9e..9aa04982cc 100644
--- a/src/relations/async_replication.py
+++ b/src/relations/async_replication.py
@@ -108,6 +108,9 @@ def __init__(self, charm):
         self.framework.observe(
             self.charm.on.promote_to_primary_action, self._on_promote_to_primary
         )
+        self.framework.observe(
+            self.charm.on.reenable_oversee_users_action, self._on_reenable_oversee_users
+        )
 
         self.framework.observe(self.charm.on.secret_changed, self._on_secret_changed)
 
@@ -600,6 +603,18 @@ def _on_promote_to_primary(self, event: ActionEvent) -> None:
         # Set the status.
         self.charm.unit.status = MaintenanceStatus("Creating replication...")
 
+    def _on_reenable_oversee_users(self, event: ActionEvent) -> None:
+        """Re-enable oversee users after cluster was promoted."""
+        if not self.charm.unit.is_leader():
+            event.fail("Unit is not leader")
+            return
+
+        if "suppress-oversee-users" not in self.charm.app_peer_data:
+            event.fail("Oversee users is not suppressed")
+            return
+
+        del self.charm.app_peer_data["suppress-oversee-users"]
+
     def _on_secret_changed(self, event: SecretChangedEvent) -> None:
         """Update the internal secret when the relation secret changes."""
         relation = self._relation
diff --git a/tests/integration/ha_tests/test_async_replication.py b/tests/integration/ha_tests/test_async_replication.py
index 3d95292229..0eef1518a7 100644
--- a/tests/integration/ha_tests/test_async_replication.py
+++ b/tests/integration/ha_tests/test_async_replication.py
@@ -4,7 +4,7 @@
 import contextlib
 import logging
 import subprocess
-from asyncio import gather
+from asyncio import gather, sleep
 from typing import Optional
 
 import psycopg2
@@ -318,6 +318,26 @@ async def test_data_integrator_creds_keep_on_working(
     finally:
         connection.close()
 
+    logger.info("Re-enable oversee users")
+    leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME, model=second_model)
+    action = await leader_unit.run_action(action_name="reenable-oversee-users")
+    await action.wait()
+
+    async with fast_forward(second_model, FAST_INTERVAL):
+        await sleep(20)
+    await second_model.wait_for_idle(
+        apps=[DATABASE_APP_NAME],
+        status="active",
+        timeout=TIMEOUT,
+    )
+    try:
+        with psycopg2.connect(connstr) as connection:
+            assert False
+    except psycopg2.OperationalError:
+        logger.info("Data integrator creds purged")
+    finally:
+        connection.close()
+
 
 @pytest.mark.group(1)
 @markers.juju3
diff --git a/tests/unit/test_async_replication.py b/tests/unit/test_async_replication.py
new file mode 100644
index 0000000000..861323dee2
--- /dev/null
+++ b/tests/unit/test_async_replication.py
@@ -0,0 +1,48 @@
+# Copyright 2023 Canonical Ltd.
+# See LICENSE file for licensing details.
+from unittest.mock import Mock
+
+import pytest
+from ops.testing import Harness
+
+from charm import PostgresqlOperatorCharm
+
+
+@pytest.fixture(autouse=True)
+def harness():
+    """Set up the test."""
+    harness = Harness(PostgresqlOperatorCharm)
+    harness.begin()
+    upgrade_relation_id = harness.add_relation("upgrade", "postgresql")
+    peer_relation_id = harness.add_relation("database-peers", "postgresql")
+    for rel_id in (upgrade_relation_id, peer_relation_id):
+        harness.add_relation_unit(rel_id, "postgresql/1")
+    with harness.hooks_disabled():
+        harness.update_relation_data(upgrade_relation_id, "postgresql/1", {"state": "idle"})
+    yield harness
+    harness.cleanup()
+
+
+def test_on_reenable_oversee_users(harness):
+    # Fail if unit is not leader
+    event = Mock()
+
+    harness.charm.async_replication._on_reenable_oversee_users(event)
+
+    event.fail.assert_called_once_with("Unit is not leader")
+    event.fail.reset_mock()
+
+    # Fail if peer data is not set
+    with harness.hooks_disabled():
+        harness.set_leader()
+
+    harness.charm.async_replication._on_reenable_oversee_users(event)
+
+    event.fail.assert_called_once_with("Oversee users is not suppressed")
+    event.fail.reset_mock()
+
+    with harness.hooks_disabled():
+        harness.charm._peers.data[harness.charm.app].update({"suppress-oversee-users": "true"})
+
+        harness.charm.async_replication._on_reenable_oversee_users(event)
+        assert harness.charm._peers.data[harness.charm.app] == {}