diff --git a/pkg/controllers/apply_controller.go b/pkg/controllers/apply_controller.go index 7650ec6..c75cd9a 100644 --- a/pkg/controllers/apply_controller.go +++ b/pkg/controllers/apply_controller.go @@ -48,6 +48,8 @@ import ( const ( workFieldManagerName = "work-api-agent" + + AnnotationLastAppliedManifest = "fleet.azure.com/last-applied-manifest" ) // ApplyWorkReconciler reconciles a Work object @@ -267,10 +269,15 @@ func (r *ApplyWorkReconciler) decodeManifest(manifest workv1alpha1.Manifest) (sc func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema.GroupVersionResource, manifestObj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { manifestRef := klog.ObjectRef{ Name: manifestObj.GetName(), - Namespace: manifestObj.GetName(), + Namespace: manifestObj.GetNamespace(), + } + if err := setManifestHashAnnotation(manifestObj); err != nil { + return nil, false, err } + curObj, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Get(ctx, manifestObj.GetName(), metav1.GetOptions{}) - if apierrors.IsNotFound(err) { + switch { + case apierrors.IsNotFound(err): actual, createErr := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Create( ctx, manifestObj, metav1.CreateOptions{FieldManager: workFieldManagerName}) if createErr == nil { @@ -278,8 +285,7 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. return actual, true, nil } return nil, false, createErr - } - if err != nil { + case err != nil: return nil, false, err } @@ -290,10 +296,6 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. return nil, false, err } - err = setManifestHashAnnotation(manifestObj) - if err != nil { - return nil, false, err - } // We only try to update the object if its spec hash value has changed. if manifestObj.GetAnnotations()[manifestHashAnnotation] != curObj.GetAnnotations()[manifestHashAnnotation] { return r.patchCurrentResource(ctx, gvr, manifestObj, curObj) @@ -306,13 +308,11 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. func (r *ApplyWorkReconciler) patchCurrentResource(ctx context.Context, gvr schema.GroupVersionResource, manifestObj, curObj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { manifestRef := klog.ObjectRef{ Name: manifestObj.GetName(), - Namespace: manifestObj.GetName(), + Namespace: manifestObj.GetNamespace(), } klog.V(5).InfoS("manifest is modified", "gvr", gvr, "manifest", manifestRef, "new hash", manifestObj.GetAnnotations()[manifestHashAnnotation], "existing hash", curObj.GetAnnotations()[manifestHashAnnotation]) - // merge owner refes since the patch does just replace - manifestObj.SetOwnerReferences(utils.MergeOwnerReference(curObj.GetOwnerReferences(), manifestObj.GetOwnerReferences())) newData, err := manifestObj.MarshalJSON() if err != nil { @@ -365,7 +365,7 @@ func (r *ApplyWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { WithOptions(controller.Options{ MaxConcurrentReconciles: r.concurrency, }). - For(&workv1alpha1.Work{}, builder.WithPredicates(predicate.ResourceVersionChangedPredicate{})). + For(&workv1alpha1.Work{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Complete(r) } diff --git a/pkg/controllers/apply_controller_integration_test.go b/pkg/controllers/apply_controller_integration_test.go new file mode 100644 index 0000000..3c760ad --- /dev/null +++ b/pkg/controllers/apply_controller_integration_test.go @@ -0,0 +1,244 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/json" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + + workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" +) + +const timeout = time.Second * 30 +const interval = time.Second * 1 + +var _ = Describe("Work Controller", func() { + var workNamespace string + var ns corev1.Namespace + + BeforeEach(func() { + workNamespace = "work-" + utilrand.String(5) + // Create namespace + ns = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: workNamespace, + }, + } + err := k8sClient.Create(context.Background(), &ns) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + err := k8sClient.Delete(context.Background(), &ns) + Expect(err).ToNot(HaveOccurred()) + }) + + Context("Deploy manifests by work", func() { + It("Should have a configmap deployed correctly", func() { + cmName := "testcm" + cmNamespace := "default" + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "test": "test", + }, + } + + work := &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: workNamespace, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Object: cm}, + }, + }, + }, + }, + } + By("create the work") + err := k8sClient.Create(context.Background(), work) + Expect(err).ToNot(HaveOccurred()) + + resultWork := waitForWorkToApply(work.GetName(), work.GetNamespace()) + Expect(len(resultWork.Status.ManifestConditions)).Should(Equal(1)) + Expect(meta.IsStatusConditionTrue(resultWork.Status.Conditions, ConditionTypeApplied)).Should(BeTrue()) + Expect(meta.IsStatusConditionTrue(resultWork.Status.ManifestConditions[0].Conditions, ConditionTypeApplied)).Should(BeTrue()) + + By("Check applied config map") + var configMap corev1.ConfigMap + Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, &configMap)).Should(Succeed()) + Expect(len(configMap.Data)).Should(Equal(1)) + Expect(configMap.Data["test"]).Should(Equal("test")) + }) + + FIt("Should pick up the manifest change correctly", func() { + cmName := "testserverapply" + cmNamespace := "default" + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + Labels: map[string]string{ + "labelKey1": "value1", + "labelKey2": "value2", + }, + Annotations: map[string]string{ + "annotationKey1": "annotation1", + "annotationKey2": "annotation2", + }, + }, + Data: map[string]string{ + "data1": "test1", + }, + } + + work := &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-label-work", + Namespace: workNamespace, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Object: cm}, + }, + }, + }, + }, + } + By("create the work") + err := k8sClient.Create(context.Background(), work) + Expect(err).ToNot(HaveOccurred()) + + By("wait for the work to be applied") + waitForWorkToApply(work.GetName(), work.GetNamespace()) + + By("Check applied config map") + var configMap corev1.ConfigMap + Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, &configMap)).Should(Succeed()) + + By("Check the config map label") + Expect(len(configMap.Labels)).Should(Equal(2)) + Expect(configMap.Labels["labelKey1"]).Should(Equal(cm.Labels["labelKey1"])) + Expect(configMap.Labels["labelKey2"]).Should(Equal(cm.Labels["labelKey2"])) + + By("Check the config map annotation value") + Expect(len(configMap.Annotations)).Should(Equal(3)) // we added one more annotation (manifest hash) + Expect(configMap.Annotations["annotationKey1"]).Should(Equal(cm.Annotations["annotationKey1"])) + Expect(configMap.Annotations["annotationKey2"]).Should(Equal(cm.Annotations["annotationKey2"])) + + By("Check the config map data") + Expect(len(configMap.Data)).Should(Equal(1)) + Expect(configMap.Data["data1"]).Should(Equal(cm.Data["data1"])) + + By("Modify the configMap") + // add new data + cm.Data["data2"] = "test2" + // modify one data + cm.Data["data1"] = "newValue" + // modify label key1 + cm.Labels["labelKey1"] = "newValue" + // remove label key2 + delete(cm.Labels, "labelKey2") + // add annotations key3 + cm.Annotations["annotationKey3"] = "annotation3" + // remove annotations key1 + delete(cm.Annotations, "annotationKey1") + + By("update the work") + resultWork := waitForWorkToApply(work.GetName(), work.GetNamespace()) + rawCM, err := json.Marshal(cm) + Expect(err).Should(Succeed()) + resultWork.Spec.Workload.Manifests[0].Raw = rawCM + Expect(k8sClient.Update(context.Background(), resultWork)).Should(Succeed()) + + By("wait for the change of the work to be applied") + resultWork = waitForWorkToApply(work.GetName(), work.GetNamespace()) + + By("Get the last applied config map") + Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, &configMap)).Should(Succeed()) + /* + By("Check the config map data") + Expect(len(configMap.Data)).Should(Equal(2)) + Expect(configMap.Data["data1"]).Should(Equal(cm.Data["data1"])) + Expect(configMap.Data["data2"]).Should(Equal(cm.Data["data2"])) + + By("Check the config map label") + Expect(len(configMap.Labels)).Should(Equal(1)) + Expect(configMap.Labels["labelKey1"]).Should(Equal(cm.Labels["labelKey1"])) + + By("Check the config map annotation value") + Expect(len(configMap.Annotations)).Should(Equal(3)) // we added one more annotation (manifest hash) + _, found := configMap.Annotations["annotationKey1"] + Expect(found).Should(BeFalse()) + Expect(configMap.Annotations["annotationKey2"]).Should(Equal(cm.Annotations["annotationKey2"])) + Expect(configMap.Annotations["annotationKey3"]).Should(Equal(cm.Annotations["annotationKey3"])) + */ + }) + }) +}) + +func waitForWorkToApply(workName, workNS string) *workv1alpha1.Work { + By("Wait for the work to be applied") + var resultWork workv1alpha1.Work + Eventually(func() bool { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNS}, &resultWork) + if err != nil { + return false + } + if len(resultWork.Status.ManifestConditions) != 1 { + return false + } + if !meta.IsStatusConditionTrue(resultWork.Status.ManifestConditions[0].Conditions, ConditionTypeApplied) { + return false + } + applyCond := meta.FindStatusCondition(resultWork.Status.Conditions, ConditionTypeApplied) + if applyCond.Status != metav1.ConditionTrue || applyCond.ObservedGeneration != resultWork.Generation { + return false + } + return true + }, timeout, interval).Should(BeTrue()) + return &resultWork +} diff --git a/pkg/controllers/apply_controller_integratoin_test.go b/pkg/controllers/apply_controller_integratoin_test.go deleted file mode 100644 index d8beb95..0000000 --- a/pkg/controllers/apply_controller_integratoin_test.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - utilrand "k8s.io/apimachinery/pkg/util/rand" - - workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" -) - -var _ = Describe("Work Controller", func() { - var workNamespace string - const timeout = time.Second * 30 - const interval = time.Second * 1 - - BeforeEach(func() { - workNamespace = "work-" + utilrand.String(5) - // Create namespace - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: workNamespace, - }, - } - _, err := k8sClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - // Add any teardown steps that needs to be executed after each test - err := k8sClient.CoreV1().Namespaces().Delete(context.Background(), workNamespace, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) - }) - - Context("Deploy manifests by work", func() { - It("Should have a configmap deployed correctly", func() { - cmName := "testcm" - cmNamespace := "default" - cm := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: cmName, - Namespace: cmNamespace, - }, - Data: map[string]string{ - "test": "test", - }, - } - - work := &workv1alpha1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-work", - Namespace: workNamespace, - }, - Spec: workv1alpha1.WorkSpec{ - Workload: workv1alpha1.WorkloadTemplate{ - Manifests: []workv1alpha1.Manifest{ - { - RawExtension: runtime.RawExtension{Object: cm}, - }, - }, - }, - }, - } - - err := workClient.Create(context.Background(), work) - Expect(err).ToNot(HaveOccurred()) - - Eventually(func() error { - _, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.Background(), cmName, metav1.GetOptions{}) - return err - }, timeout, interval).Should(Succeed()) - - Eventually(func() error { - resultWork := workv1alpha1.Work{} - err := workClient.Get(context.Background(), types.NamespacedName{Name: work.GetName(), Namespace: workNamespace}, &resultWork) - if err != nil { - return err - } - if len(resultWork.Status.ManifestConditions) != 1 { - return fmt.Errorf("Expect the 1 manifest condition is updated") - } - - if !meta.IsStatusConditionTrue(resultWork.Status.ManifestConditions[0].Conditions, "Applied") { - return fmt.Errorf("Exepect condition status of the manifest to be true") - } - - if !meta.IsStatusConditionTrue(resultWork.Status.Conditions, "Applied") { - return fmt.Errorf("Exepect condition status of the work to be true") - } - return nil - }, timeout, interval).Should(Succeed()) - }) - }) -}) diff --git a/pkg/controllers/suite_test.go b/pkg/controllers/suite_test.go index c1339d3..657b18e 100644 --- a/pkg/controllers/suite_test.go +++ b/pkg/controllers/suite_test.go @@ -18,16 +18,16 @@ package controllers import ( "context" + "flag" "os" "path/filepath" "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -41,13 +41,11 @@ import ( var ( cfg *rest.Config // TODO: Seperate k8sClient into hub and spoke - k8sClient kubernetes.Interface - workClient client.Client - dynamicClient dynamic.Interface - testEnv *envtest.Environment - setupLog = ctrl.Log.WithName("test") - ctx context.Context - cancel context.CancelFunc + k8sClient client.Client + testEnv *envtest.Environment + setupLog = ctrl.Log.WithName("test") + ctx context.Context + cancel context.CancelFunc ) func TestAPIs(t *testing.T) { @@ -59,6 +57,11 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func(done Done) { + By("Setup klog") + fs := flag.NewFlagSet("klog", flag.ContinueOnError) + klog.InitFlags(fs) + Expect(fs.Parse([]string{"--v", "5", "-add_dir_header", "true"})).Should(Succeed()) + By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("../../", "config", "crd")}, @@ -72,20 +75,15 @@ var _ = BeforeSuite(func(done Done) { err = workv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - opts := ctrl.Options{ - Scheme: scheme.Scheme, - } - - k8sClient, err = kubernetes.NewForConfig(cfg) - Expect(err).NotTo(HaveOccurred()) - workClient, err = client.New(cfg, client.Options{ + k8sClient, err = client.New(cfg, client.Options{ Scheme: scheme.Scheme, }) Expect(err).NotTo(HaveOccurred()) - dynamicClient, err = dynamic.NewForConfig(cfg) - Expect(err).NotTo(HaveOccurred()) go func() { + opts := ctrl.Options{ + Scheme: scheme.Scheme, + } ctx, cancel = context.WithCancel(context.Background()) if err := Start(ctx, cfg, cfg, setupLog, opts); err != nil { setupLog.Error(err, "problem running controllers") diff --git a/pkg/controllers/work_status_controller_integration_test.go b/pkg/controllers/work_status_controller_integration_test.go index 95c6c67..fa5f914 100644 --- a/pkg/controllers/work_status_controller_integration_test.go +++ b/pkg/controllers/work_status_controller_integration_test.go @@ -20,15 +20,13 @@ import ( "context" "time" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -42,27 +40,29 @@ var _ = Describe("Work Status Reconciler", func() { const timeout = time.Second * 30 const interval = time.Second * 1 + var wns corev1.Namespace + var rns corev1.Namespace BeforeEach(func() { - workName = utilrand.String(5) - workNamespace = utilrand.String(5) - resourceName = utilrand.String(5) + workName = "work-" + utilrand.String(5) + workNamespace = "cluster-" + utilrand.String(5) + resourceName = "configmap-" + utilrand.String(5) resourceNamespace = utilrand.String(5) - wns := &corev1.Namespace{ + wns = corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: workNamespace, }, } - _, err := k8sClient.CoreV1().Namespaces().Create(context.Background(), wns, metav1.CreateOptions{}) + err := k8sClient.Create(context.Background(), &wns) Expect(err).ToNot(HaveOccurred()) - rns := &corev1.Namespace{ + rns = corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: resourceNamespace, }, } - _, err = k8sClient.CoreV1().Namespaces().Create(context.Background(), rns, metav1.CreateOptions{}) + err = k8sClient.Create(context.Background(), &rns) Expect(err).ToNot(HaveOccurred()) // Create the Work object with some type of Manifest resource. @@ -96,14 +96,14 @@ var _ = Describe("Work Status Reconciler", func() { }, } - createWorkErr := workClient.Create(context.Background(), work) + createWorkErr := k8sClient.Create(context.Background(), work) Expect(createWorkErr).ToNot(HaveOccurred()) Eventually(func() bool { namespacedName := types.NamespacedName{Name: workName, Namespace: workNamespace} getAppliedWork := workv1alpha1.AppliedWork{} - err := workClient.Get(context.Background(), namespacedName, &getAppliedWork) + err := k8sClient.Get(context.Background(), namespacedName, &getAppliedWork) if err == nil { return getAppliedWork.Spec.WorkName == workName } @@ -113,51 +113,54 @@ var _ = Describe("Work Status Reconciler", func() { AfterEach(func() { // TODO: Ensure that all resources are being deleted. - err := k8sClient.CoreV1().Namespaces().Delete(context.Background(), workNamespace, metav1.DeleteOptions{}) + err := k8sClient.Delete(context.Background(), &wns) + Expect(err).ToNot(HaveOccurred()) + + err = k8sClient.Delete(context.Background(), &rns) Expect(err).ToNot(HaveOccurred()) }) Context("Receives a request where a Work's manifest condition does not contain the metadata of an existing AppliedResourceMeta", func() { It("Should delete the resource from the spoke cluster", func() { - currentWork := workv1alpha1.Work{} - err := workClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNamespace}, ¤tWork) - Expect(err).ToNot(HaveOccurred()) + currentWork := waitForWorkToApply(workName, workNamespace) - currentWork.Status.ManifestConditions = []workv1alpha1.ManifestCondition{} + By("wait for the resource to propagate to the appliedWork") + appliedWork := workv1alpha1.AppliedWork{} + Eventually(func() bool { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: workName}, &appliedWork) + Expect(err).ToNot(HaveOccurred()) + return len(appliedWork.Status.AppliedResources) == 1 + }, timeout, interval).Should(BeTrue()) - err = workClient.Update(context.Background(), ¤tWork) - Expect(err).ToNot(HaveOccurred()) + By("Remove the resource from the works") + currentWork.Spec.Workload.Manifests = nil + Expect(k8sClient.Update(context.Background(), currentWork)).Should(Succeed()) Eventually(func() bool { - gvr := schema.GroupVersionResource{ - Group: "core", - Version: "v1", - Resource: "ConfigMap", - } - _, err := dynamicClient.Resource(gvr).Namespace(resourceNamespace).Get(context.Background(), resourceName, metav1.GetOptions{}) - - return err != nil + var configMap corev1.ConfigMap + return apierrors.IsNotFound(k8sClient.Get(context.Background(), types.NamespacedName{Name: resourceName, Namespace: resourceNamespace}, &configMap)) }, timeout, interval).Should(BeTrue()) }) }) - Context("Receives a request where a Work's manifest condition exists, but there"+ - " isn't a respective AppliedResourceMeta.", func() { + + // TODO: rewrite this test. + Context("Receives a request where a Work's manifest condition exists, but there isn't a respective AppliedResourceMeta.", func() { It("should delete the AppliedResourceMeta from the respective AppliedWork status", func() { appliedWork := workv1alpha1.AppliedWork{} Eventually(func() error { - err := workClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNamespace}, &appliedWork) + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNamespace}, &appliedWork) Expect(err).ToNot(HaveOccurred()) appliedWork.Status.AppliedResources = []workv1alpha1.AppliedResourceMeta{} - err = workClient.Update(context.Background(), &appliedWork) + err = k8sClient.Update(context.Background(), &appliedWork) return err }, timeout, interval).ShouldNot(HaveOccurred()) Eventually(func() bool { namespacedName := types.NamespacedName{Name: workName, Namespace: workNamespace} - err := workClient.Get(context.Background(), namespacedName, &appliedWork) + err := k8sClient.Get(context.Background(), namespacedName, &appliedWork) if err != nil { return false } diff --git a/tests/e2e/apply_test.go b/tests/e2e/apply_test.go index 640964f..2a46f1b 100644 --- a/tests/e2e/apply_test.go +++ b/tests/e2e/apply_test.go @@ -469,7 +469,7 @@ var MultipleWorkWithSameManifestContext = func(description string, manifestFiles }) - It("should ignore the duplicate manifest", func() { + It("should apply both the duplicate manifest", func() { By("creating the work one resources") Expect(createWork(workOne)).To(Succeed())