diff --git a/.github/install_latest_podman.sh b/.github/install_latest_podman.sh
old mode 100644
new mode 100755
index ceedbe0..39fdfd1
--- a/.github/install_latest_podman.sh
+++ b/.github/install_latest_podman.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
 sudo apt-get update
 sudo apt-get -y upgrade
 sudo apt-get -y install podman
diff --git a/.github/workflows/e2e-testing.yaml b/.github/workflows/e2e-testing.yaml
index 79c4b82..d97ac6f 100644
--- a/.github/workflows/e2e-testing.yaml
+++ b/.github/workflows/e2e-testing.yaml
@@ -8,11 +8,11 @@ jobs:
     strategy:
       matrix:
         kind-node-images:
-          - kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245
-          - kindest/node:v1.28.7@sha256:9bc6c451a289cf96ad0bbaf33d416901de6fd632415b076ab05f5fa7e4f65c58
-          - kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
-          - kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
-          - kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8
+          - kindest/node:v1.31.0@sha256:53df588e04085fd41ae12de0c3fe4c72f7013bba32a20e7325357a1ac94ba865
+          - kindest/node:v1.30.4@sha256:976ea815844d5fa93be213437e3ff5754cd599b040946b5cca43ca45c2047114
+          - kindest/node:v1.29.8@sha256:d46b7aa29567e93b27f7531d258c372e829d7224b25e3fc6ffdefed12476d3aa
+          - kindest/node:v1.28.13@sha256:45d319897776e11167e4698f6b14938eb4d52eb381d9e3d7a9086c16c69a8110
+
 
     steps:
       - name: Checkout
diff --git a/.gitignore b/.gitignore
index 68d0222..5a9be51 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,6 @@
 src/__pycache__/
+src/tests/__pycache__/
 yaml/Object_example/debug-*
+.vscode
+.coverage
+lcov.info
diff --git a/Makefile b/Makefile
index 5c02c75..50f4b66 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 IMG_NAMESPACE = flag5
 IMG_NAME = clustersecret
 IMG_FQNAME = $(IMG_NAMESPACE)/$(IMG_NAME)
-IMG_VERSION = 0.0.10
+IMG_VERSION = 0.0.11
 
 .PHONY: container push clean 
 all: container
@@ -69,5 +69,9 @@ chart-update:
 	helm package charts/cluster-secret/ -d docs/
 	helm repo index ./docs
 
-dev-run:
+dev-prepare:
+	kubectl apply -f ./yaml/00_rbac.yaml
+	kubectl apply -f ./yaml/01_crd.yaml
+
+dev-run: dev-prepare
 	kopf run ./src/handlers.py --verbose -A
diff --git a/README.md b/README.md
index db88160..a9cd8b1 100644
--- a/README.md
+++ b/README.md
@@ -5,8 +5,6 @@
 ## Kubernetes ClusterSecret 
 [*clustersecret.com*](https://clustersecret.com/)
 
-# note clustersecret.io domain is deprecated. use clustersecret.com from now on.
-
 Cluster wide secrets
 
 ClusterSecret operator makes sure all the matching namespaces have the secret available and up to date.
diff --git a/charts/cluster-secret/Chart.yaml b/charts/cluster-secret/Chart.yaml
index 61b992f..12f9f80 100755
--- a/charts/cluster-secret/Chart.yaml
+++ b/charts/cluster-secret/Chart.yaml
@@ -1,13 +1,13 @@
 apiVersion: v2
 name: cluster-secret
 description: ClusterSecret Operator
-kubeVersion: '>= 1.16.0-0'
+kubeVersion: '>= 1.25.0-0'
 type: application
-version: 0.4.2
+version: 0.4.3
 icon: https://clustersecret.com/assets/csninjasmall.png
 sources:
 - https://github.com/zakkg3/ClusterSecret
-appVersion: "0.0.10"
+appVersion: "0.0.11"
 maintainers:
 - email: zakkg3@gmail.com
   name: zakkg3
diff --git a/charts/cluster-secret/README.md b/charts/cluster-secret/README.md
index 600d3a1..c00f493 100644
--- a/charts/cluster-secret/README.md
+++ b/charts/cluster-secret/README.md
@@ -57,7 +57,7 @@ Clustersecrets automates this. It keep track of any modification in your secret
 
 ## Requirements
 
-Current is 0.0.10 tested on > 1.27.1
+Current is 0.0.11 tested on > 1.27.1
 Version 0.0.9 is tested for Kubernetes >= 1.19 up to 1.27.1
 
 For older kubernes (<1.19) use the image tag "0.0.6" in  yaml/02_deployment.yaml
diff --git a/charts/cluster-secret/templates/deployment.yaml b/charts/cluster-secret/templates/deployment.yaml
index 9e133da..4c7c0f9 100644
--- a/charts/cluster-secret/templates/deployment.yaml
+++ b/charts/cluster-secret/templates/deployment.yaml
@@ -22,6 +22,10 @@ spec:
       labels:
         app: clustersecret
       {{- include "cluster-secret.selectorLabels" . | nindent 8 }}
+      annotations:
+        {{- range $key, $value := .Values.podAnnotations }}
+        {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }}
+        {{- end }}
     spec:
       securityContext:
         runAsUser: 100 # 100 is set by the container and can NOT be changed here - this would result in a getpwuid() error
@@ -59,4 +63,4 @@ spec:
       {{- with .Values.tolerations }}
       tolerations:
         {{- toYaml . | nindent 8 }}
-      {{- end }}
\ No newline at end of file
+      {{- end }}
diff --git a/charts/cluster-secret/values.yaml b/charts/cluster-secret/values.yaml
index 09b0285..f8baa7f 100644
--- a/charts/cluster-secret/values.yaml
+++ b/charts/cluster-secret/values.yaml
@@ -1,7 +1,7 @@
 imagePullSecrets: []
 image:
   repository: quay.io/clustersecret/clustersecret
-  tag: 0.0.10
+  tag: 0.0.11
   # use tag-alt for ARM and other alternative builds - read the readme for more information
   # If Clustersecret is about to create a secret and then it founds it exists:
   # Default is to ignore it. (to not loose any unintentional data)
@@ -15,3 +15,6 @@ nodeSelector: {}
 tolerations: []
 
 affinity: {}
+
+# Additional Pod annotations
+podAnnotations: {}
diff --git a/src/handlers.py b/src/handlers.py
index 769c783..f0c32dd 100644
--- a/src/handlers.py
+++ b/src/handlers.py
@@ -1,4 +1,5 @@
 import logging
+import sys
 from typing import Any, Dict, List, Optional
 
 import kopf
@@ -14,15 +15,14 @@
 
 from os_utils import in_cluster
 
-csecs: Dict[str, Any] = {}
-
-# Loading kubeconfig
-if in_cluster():
+if "unittest" not in sys.modules:
     # Loading kubeconfig
-    config.load_incluster_config()
-else:
-    # Loading using the local kubevonfig.
-    config.load_kube_config()
+    if in_cluster():
+        # Loading kubeconfig
+        config.load_incluster_config()
+    else:
+        # Loading using the local kubevonfig.
+        config.load_kube_config()
 
 v1 = client.CoreV1Api()
 custom_objects_api = client.CustomObjectsApi()
@@ -92,7 +92,7 @@ def on_field_match_namespace(
         uid=uid,
         name=name,
         namespace=namespace,
-        data=body.get('data'),
+        body=body,
         synced_namespace=updated_matched,
     ))
 
@@ -113,6 +113,8 @@ def on_field_data(
     body: Dict[str, Any],
     meta: kopf.Meta,
     name: str,
+    namespace: Optional[str],
+    uid: str,
     logger: logging.Logger,
     **_,
 ):
@@ -126,9 +128,14 @@ def on_field_data(
 
     secret_type = body.get('type', 'Opaque')
 
+    cached_cluster_secret = csecs_cache.get_cluster_secret(uid)
+    if cached_cluster_secret is None:
+        logger.error('Received an event for an unknown ClusterSecret.')
+
+    updated_syncedns = syncedns.copy()
     for ns in syncedns:
         logger.info(f'Re Syncing secret {name} in ns {ns}')
-        body = client.V1Secret(
+        ns_sec_body = client.V1Secret(
             api_version='v1',
             data={str(key): str(value) for key, value in new.items()},
             kind='Secret',
@@ -140,14 +147,42 @@ def on_field_data(
             ),
             type=secret_type,
         )
-        logger.debug(f'body: {body}')
+        logger.debug(f'body: {ns_sec_body}')
         # Ensuring the secret still exist.
         if secret_exists(logger=logger, name=name, namespace=ns, v1=v1):
-            response = v1.replace_namespaced_secret(name=name, namespace=ns, body=body)
+            response = v1.replace_namespaced_secret(name=name, namespace=ns, body=ns_sec_body)
         else:
-            response = v1.create_namespaced_secret(namespace=ns, body=body)
+            try:
+                v1.read_namespace(name=ns)
+            except client.exceptions.ApiException as e:
+                if e.status != 404:
+                    raise
+                response = f'Namespace {ns} not found'
+                updated_syncedns.remove(ns)
+                logger.info(f'Namespace {ns} not found while Syncing secret {name}')
+            else:
+                response = v1.create_namespaced_secret(namespace=ns, body=ns_sec_body)
         logger.debug(response)
 
+    if updated_syncedns != syncedns:
+        # Patch synced_ns field
+        logger.debug(f'Patching clustersecret {name} in namespace {namespace}')
+        body = patch_clustersecret_status(
+            logger=logger,
+            name=name,
+            new_status={'create_fn': {'syncedns': updated_syncedns}},
+            custom_objects_api=custom_objects_api,
+        )
+
+    # Updating the cache
+    csecs_cache.set_cluster_secret(BaseClusterSecret(
+        uid=uid,
+        name=name,
+        namespace=namespace or "",
+        body=body,
+        synced_namespace=updated_syncedns,
+    ))
+
 
 @kopf.on.resume('clustersecret.io', 'v1', 'clustersecrets')
 @kopf.on.create('clustersecret.io', 'v1', 'clustersecrets')
@@ -164,8 +199,8 @@ async def create_fn(
 
     # sync in all matched NS
     logger.info(f'Syncing on Namespaces: {matchedns}')
-    for namespace in matchedns:
-        sync_secret(logger, namespace, body, v1)
+    for ns in matchedns:
+        sync_secret(logger, ns, body, v1)
 
     # store status in memory
     cached_cluster_secret = csecs_cache.get_cluster_secret(uid)
@@ -176,8 +211,8 @@ async def create_fn(
     csecs_cache.set_cluster_secret(BaseClusterSecret(
         uid=uid,
         name=name,
-        namespace=namespace,
-        data=body.get('data'),
+        namespace=namespace or "",
+        body=body,
         synced_namespace=matchedns,
     ))
 
@@ -193,10 +228,10 @@ async def namespace_watcher(logger: logging.Logger, meta: kopf.Meta, **_):
     logger.debug(f'New namespace created: {new_ns} re-syncing')
     ns_new_list = []
     for cluster_secret in csecs_cache.all_cluster_secret():
-        obj_body = cluster_secret['body']
-        name = obj_body['metadata']['name']
+        obj_body = cluster_secret.body
+        name = cluster_secret.name
 
-        matcheddns = cluster_secret['syncedns']
+        matcheddns = cluster_secret.synced_namespace
 
         logger.debug(f'Old matched namespace: {matcheddns} - name: {name}')
         ns_new_list = get_ns_list(logger, obj_body, v1)
@@ -211,11 +246,16 @@ async def namespace_watcher(logger: logging.Logger, meta: kopf.Meta, **_):
             )
 
             # if there is a new matching ns, refresh cache
-            cluster_secret.namespace = ns_new_list
+            cluster_secret.synced_namespace = ns_new_list
             csecs_cache.set_cluster_secret(cluster_secret)
 
-    # update ns_new_list on the object so then we also delete from there
-    return {'syncedns': ns_new_list}
+        # update ns_new_list on the object so then we also delete from there
+        patch_clustersecret_status(
+            logger=logger,
+            name=cluster_secret.name,
+            new_status={'create_fn': {'syncedns': ns_new_list}},
+            custom_objects_api=custom_objects_api,
+        )
 
 
 @kopf.on.startup()
@@ -243,8 +283,8 @@ async def startup_fn(logger: logging.Logger, **_):
             BaseClusterSecret(
                 uid=metadata.get('uid'),
                 name=metadata.get('name'),
-                namespace=metadata.get('namespace'),
-                data=item.get('data'),
+                namespace=metadata.get('namespace', ''),
+                body=item,
                 synced_namespace=item.get('status', {}).get('create_fn', {}).get('syncedns', []),
             )
         )
diff --git a/src/kubernetes_utils.py b/src/kubernetes_utils.py
index 444b5cd..f862bb0 100644
--- a/src/kubernetes_utils.py
+++ b/src/kubernetes_utils.py
@@ -36,7 +36,7 @@ def patch_clustersecret_status(
     logger.debug(f'Updated clustersecret manifest: {clustersecret}')
 
     # Perform a patch operation to update the custom resource
-    custom_objects_api.patch_cluster_custom_object(
+    return custom_objects_api.patch_cluster_custom_object(
         group=group,
         version=version,
         plural=plural,
diff --git a/src/models.py b/src/models.py
index e2cbc46..806daf0 100644
--- a/src/models.py
+++ b/src/models.py
@@ -7,5 +7,5 @@ class BaseClusterSecret(BaseModel):
     uid: str
     name: str
     namespace: str
-    data: Dict[str, Any]
+    body: Dict[str, Any]
     synced_namespace: List[str]
diff --git a/src/requirements.txt b/src/requirements.txt
index 287234d..7ef4ecc 100644
--- a/src/requirements.txt
+++ b/src/requirements.txt
@@ -1,4 +1,4 @@
-kopf===1.35.3
+kopf===1.37.2
 kubernetes===19.15.0
 setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
-pydantic==2.3.0
\ No newline at end of file
+pydantic==2.4.0
\ No newline at end of file
diff --git a/src/tests/test_handlers.py b/src/tests/test_handlers.py
new file mode 100644
index 0000000..6e7cb34
--- /dev/null
+++ b/src/tests/test_handlers.py
@@ -0,0 +1,375 @@
+import asyncio
+import kopf
+import logging
+import unittest
+
+from kubernetes.client import V1ObjectMeta, V1Secret, ApiException
+from unittest.mock import ANY, Mock, patch
+
+from handlers import create_fn, custom_objects_api, csecs_cache, namespace_watcher, on_field_data, startup_fn
+from kubernetes_utils import create_secret_metadata
+from models import BaseClusterSecret
+
+
+class TestClusterSecretHandler(unittest.TestCase):
+
+    def setUp(self):
+        self.logger = logging.getLogger(__name__)
+        for cluster_secret in csecs_cache.all_cluster_secret():
+            csecs_cache.remove_cluster_secret(cluster_secret.uid)
+
+    def test_on_field_data_cache(self):
+        """New data should be written into the cache.
+        """
+
+        # Old data in the cache.
+        csec = BaseClusterSecret(
+            uid="mysecretuid",
+            name="mysecret",
+            namespace="",
+            body={"metadata": {"name": "mysecret", "uid": "mysecretuid"}, "data": {"key": "oldvalue"}},
+            synced_namespace=[],
+        )
+
+        csecs_cache.set_cluster_secret(csec)
+
+        # New data coming into the callback.
+        new_body = {"metadata": {"name": "mysecret", "uid": "mysecretuid"}, "data": {"key": "newvalue"}}
+
+        on_field_data(
+            old={"key": "oldvalue"},
+            new={"key": "newvalue"},
+            body=new_body,
+            meta=kopf.Meta({"metadata": {"name": "mysecret"}}),
+            name="mysecret",
+            namespace=None,
+            uid="mysecretuid",
+            logger=self.logger,
+        )
+
+        # New data should be in the cache.
+        self.assertEqual(
+            csecs_cache.get_cluster_secret("mysecretuid").body.get("data"),
+            {"key": "newvalue"},
+        )
+
+    def test_on_field_data_sync(self):
+        """Must sync secret data changes to the namespaces.
+        """
+
+        mock_v1 = Mock()
+
+        # Old data in the namespaced secret of the myns namespace.
+        mock_v1.read_namespaced_secret.return_value = V1Secret(
+            api_version='v1',
+            data={"key": "oldvalue"},
+            kind='Secret',
+            metadata=create_secret_metadata(
+                name="mysecret",
+                namespace="myns",
+            ),
+            type="Opaque",
+        )
+
+        # Old data in the cache.
+        csec = BaseClusterSecret(
+            uid="mysecretuid",
+            name="mysecret",
+            namespace="",
+            body={
+                "metadata": {"name": "mysecret", "uid": "mysecretuid"},
+                "data": {"key": "oldvalue"},
+                "status": {"create_fn": {"syncedns": ["myns"]}},
+            },
+            synced_namespace=["myns"],
+        )
+
+        csecs_cache.set_cluster_secret(csec)
+
+        # New data coming into the callback.
+        new_body = {
+            "metadata": {"name": "mysecret", "uid": "mysecretuid"},
+            "data": {"key": "newvalue"},
+            "status": {"create_fn": {"syncedns": ["myns"]}},
+        }
+
+        with patch("handlers.v1", mock_v1):
+            on_field_data(
+                old={"key": "oldvalue"},
+                new={"key": "newvalue"},
+                body=new_body,
+                meta=kopf.Meta({"metadata": {"name": "mysecret"}}),
+                name="mysecret",
+                namespace=None,
+                uid="mysecretuid",
+                logger=self.logger,
+            )
+
+        # Namespaced secret should be updated.
+        mock_v1.replace_namespaced_secret.assert_called_once_with(
+            name=csec.name,
+            namespace="myns",
+            body=ANY,
+        )
+
+        # Namespaced secret should be updated with the new data.
+        self.assertEqual(
+            mock_v1.replace_namespaced_secret.call_args.kwargs.get("body").data,
+            {"key": "newvalue"},
+        )
+
+    def test_on_field_data_ns_deleted(self):
+        """Don't fail the sync if one of the namespaces was deleted.
+        """
+
+        mock_v1 = Mock()
+
+        def read_namespaced_secret(name, namespace, **kwargs):
+            if namespace == "myns2":
+                # Old data in the namespaced secret of the myns namespace.
+                return V1Secret(
+                    api_version='v1',
+                    data={"key": "oldvalue"},
+                    kind='Secret',
+                    metadata=create_secret_metadata(
+                        name="mysecret",
+                        namespace="myns2",
+                    ),
+                    type="Opaque",
+                )
+            else:
+                # Deleted namespace.
+                raise ApiException(status=404, reason="Not Found")
+
+        mock_v1.read_namespaced_secret = read_namespaced_secret
+
+        create_namespaced_secret_called_count_for_ns2 = 0
+
+        def create_namespaced_secret(namespace, body, **kwargs):
+            if namespace == "myns2":
+                nonlocal create_namespaced_secret_called_count_for_ns2
+                create_namespaced_secret_called_count_for_ns2 += 1
+            else:
+                # Deleted namespace.
+                raise ApiException(status=404, reason="Not Found")
+
+        mock_v1.create_namespaced_secret = create_namespaced_secret
+
+        replace_namespaced_secret_called_count_for_ns2 = 0
+
+        def replace_namespaced_secret(name, namespace, body, **kwargs):
+            if namespace == "myns2":
+                nonlocal replace_namespaced_secret_called_count_for_ns2
+                replace_namespaced_secret_called_count_for_ns2 += 1
+                self.assertEqual(name, csec.name)
+
+                # Namespaced secret should be updated with the new data.
+                self.assertEqual(
+                    body.data,
+                    {"key": "newvalue"},
+                )
+
+                return V1Secret(
+                    api_version='v1',
+                    data=body.data,
+                    kind='Secret',
+                    metadata=create_secret_metadata(
+                        name="mysecret",
+                        namespace="myns2",
+                    ),
+                    type="Opaque",
+                )
+            else:
+                # Deleted namespace.
+                raise ApiException(status=404, reason="Not Found")
+
+        mock_v1.replace_namespaced_secret = replace_namespaced_secret
+
+        def read_namespace(name, **kwargs):
+            if name != "myns2":
+                # Deleted namespace.
+                raise ApiException(status=404, reason="Not Found")
+
+        mock_v1.read_namespace = read_namespace
+
+        patch_clustersecret_status = Mock()
+        patch_clustersecret_status.return_value = {
+            "metadata": {"name": "mysecret", "uid": "mysecretuid"},
+            "data": {"key": "newvalue"},
+            "status": {"create_fn": {"syncedns": ["myns2"]}},
+        }
+
+        # Old data in the cache.
+        csec = BaseClusterSecret(
+            uid="mysecretuid",
+            name="mysecret",
+            namespace="",
+            body={
+                "metadata": {"name": "mysecret", "uid": "mysecretuid"},
+                "data": {"key": "oldvalue"},
+                "status": {"create_fn": {"syncedns": ["myns1", "myns2"]}},
+            },
+            synced_namespace=["myns1", "myns2"],
+        )
+
+        csecs_cache.set_cluster_secret(csec)
+
+        # New data coming into the callback.
+        new_body = {
+            "metadata": {"name": "mysecret", "uid": "mysecretuid"},
+            "data": {"key": "newvalue"},
+            "status": {"create_fn": {"syncedns": ["myns1", "myns2"]}},
+        }
+
+        with patch("handlers.v1", mock_v1), \
+             patch("handlers.patch_clustersecret_status", patch_clustersecret_status):
+            on_field_data(
+                old={"key": "oldvalue"},
+                new={"key": "newvalue"},
+                body=new_body,
+                meta=kopf.Meta({"metadata": {"name": "mysecret"}}),
+                name="mysecret",
+                namespace=None,
+                uid="mysecretuid",
+                logger=self.logger,
+            )
+
+        # Namespaced secret should be updated with the new data.
+        self.assertEqual(replace_namespaced_secret_called_count_for_ns2, 1)
+        self.assertEqual(create_namespaced_secret_called_count_for_ns2, 0)
+
+        # The namespace should be deleted from the syncedns status of the clustersecret.
+        patch_clustersecret_status.assert_called_once_with(
+            logger=self.logger,
+            name=csec.name,
+            new_status={'create_fn': {'syncedns': ["myns2"]}},
+            custom_objects_api=custom_objects_api,
+        )
+
+        # Namespace should be deleted from the cache.
+        self.assertEqual(
+            csecs_cache.get_cluster_secret("mysecretuid").body.get("status"),
+            {"create_fn": {"syncedns": ["myns2"]}},
+        )
+        self.assertEqual(
+            csecs_cache.get_cluster_secret("mysecretuid").synced_namespace,
+            ["myns2"],
+        )
+
+    def test_create_fn(self):
+        """Namespace name must be correct in the cache.
+        """
+
+        mock_v1 = Mock()
+
+        body = {
+            "metadata": {
+                "name": "mysecret",
+                "namespace": "myclustersecretnamespace",
+                "uid": "mysecretuid"
+            },
+            "data": {"key": "value"}
+        }
+
+        # Define the predefined list of namespaces you want to use in the test
+        predefined_nss = [Mock(metadata=V1ObjectMeta(name=ns)) for ns in ["default", "myns"]]
+
+        # Configure the mock's behavior to return the predefined namespaces when list_namespace is called
+        mock_v1.list_namespace.return_value.items = predefined_nss
+
+        with patch("handlers.v1", mock_v1), \
+             patch("handlers.sync_secret"):
+            asyncio.run(
+                create_fn(
+                    logger=self.logger,
+                    uid="mysecretuid",
+                    name="mysecret",
+                    namespace="myclustersecretnamespace",
+                    body=body,
+                )
+            )
+
+        # ClusterSecret with a correct namespace should be in the cache.
+        self.assertEqual(
+            csecs_cache.get_cluster_secret("mysecretuid").namespace,
+            "myclustersecretnamespace",
+        )
+
+    def test_ns_create(self):
+        """A new namespace must get the cluster secrets.
+        """
+
+        mock_v1 = Mock()
+
+        # Define the predefined list of namespaces you want to use in the test
+        predefined_nss = [Mock(metadata=V1ObjectMeta(name=ns)) for ns in ["default", "myns"]]
+
+        # Configure the mock's behavior to return the predefined namespaces when list_namespace is called
+        mock_v1.list_namespace.return_value.items = predefined_nss
+
+        patch_clustersecret_status = Mock()
+
+        csec = BaseClusterSecret(
+            uid="mysecretuid",
+            name="mysecret",
+            namespace="",
+            body={"metadata": {"name": "mysecret"}, "data": "mydata"},
+            synced_namespace=["default"],
+        )
+
+        csecs_cache.set_cluster_secret(csec)
+
+        with patch("handlers.v1", mock_v1), \
+             patch("handlers.patch_clustersecret_status", patch_clustersecret_status):
+            asyncio.run(
+                namespace_watcher(
+                    logger=self.logger,
+                    meta=kopf.Meta({"metadata": {"name": "myns"}}),
+                )
+            )
+
+        # The new namespace should have the secret copied into it.
+        mock_v1.replace_namespaced_secret.assert_called_once_with(
+            name=csec.name,
+            namespace="myns",
+            body=ANY,
+        )
+
+        # The namespace should be added to the syncedns status of the clustersecret.
+        patch_clustersecret_status.assert_called_once_with(
+            logger=self.logger,
+            name=csec.name,
+            new_status={'create_fn': {'syncedns': ["default", "myns"]}},
+            custom_objects_api=custom_objects_api,
+        )
+
+        # The new namespace should be in the cache.
+        self.assertCountEqual(
+            csecs_cache.get_cluster_secret("mysecretuid").synced_namespace,
+            ["default", "myns"],
+        )
+
+    def test_startup_fn(self):
+        """Must not fail on empty namespace in ClusterSecret metadata (it's cluster-wide after all).
+        """
+
+        get_custom_objects_by_kind = Mock()
+
+        csec = BaseClusterSecret(
+            uid="mysecretuid",
+            name="mysecret",
+            namespace="",
+            body={"metadata": {"name": "mysecret", "uid": "mysecretuid"}, "data": "mydata"},
+            synced_namespace=[],
+        )
+
+        get_custom_objects_by_kind.return_value = [csec.body]
+
+        with patch("handlers.get_custom_objects_by_kind", get_custom_objects_by_kind):
+            asyncio.run(startup_fn(logger=self.logger))
+
+        # The secret should be in the cache.
+        self.assertEqual(
+            csecs_cache.get_cluster_secret("mysecretuid"),
+            csec,
+        )