From bead9cad25b9e10b011efb493f5f9e0733e46840 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Mon, 15 Aug 2022 23:06:10 -0700 Subject: [PATCH 1/7] remove all constant --- pkg/controllers/apply_controller.go | 134 ++++++++---------- pkg/controllers/apply_controller_unit_test.go | 20 +-- pkg/controllers/finalize_controller.go | 57 ++------ pkg/controllers/work_status_controller.go | 53 +++---- pkg/utils/constants.go | 41 ------ pkg/utils/test_utils.go | 37 +++++ 6 files changed, 143 insertions(+), 199 deletions(-) delete mode 100644 pkg/utils/constants.go diff --git a/pkg/controllers/apply_controller.go b/pkg/controllers/apply_controller.go index 5e0a81d2..ba9de430 100644 --- a/pkg/controllers/apply_controller.go +++ b/pkg/controllers/apply_controller.go @@ -23,7 +23,6 @@ import ( "fmt" "time" - "github.com/pkg/errors" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -44,11 +43,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - "sigs.k8s.io/work-api/pkg/utils" ) const ( - messageWorkFinalizerMissing = "the Work resource has no finalizer yet, it will be added" + workFieldManagerName = "work-api-agent" ) // ApplyWorkReconciler reconciles a Work object @@ -84,33 +82,35 @@ type applyResult struct { // Reconcile implement the control loop logic for Work object. func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.InfoS("Work apply controller reconcile loop triggered.", "item", req.NamespacedName) - if !r.Joined { - klog.InfoS("work controller is not started yet") + klog.InfoS("work controller is not started yet", "work", req.NamespacedName) return ctrl.Result{RequeueAfter: time.Second * 5}, nil } + klog.InfoS("Work apply controller reconcile loop triggered.", "work", req.NamespacedName) work := &workv1alpha1.Work{} err := r.client.Get(ctx, req.NamespacedName, work) switch { case apierrors.IsNotFound(err): + klog.InfoS("the work resource is deleted", "work", req.NamespacedName) return ctrl.Result{}, nil case err != nil: + klog.ErrorS(err, "failed to retrieve the work", "work", req.NamespacedName) return ctrl.Result{}, err } kLogObjRef := klog.KObj(work) // do nothing if the finalizer is not present // it ensures all maintained resources will be cleaned once work is deleted + // TODO: merge the finalizer controller into this if !controllerutil.ContainsFinalizer(work, workFinalizer) { - klog.InfoS(messageWorkFinalizerMissing, work.Kind, kLogObjRef) + klog.InfoS("the work resource has no finalizer yet, it will be added", work.Kind, kLogObjRef) return ctrl.Result{}, nil } // leave the finalizer to clean up if !work.DeletionTimestamp.IsZero() { - klog.InfoS(utils.MessageResourceDeleting, work.Kind, kLogObjRef) + klog.InfoS("resource is in the process of being deleted", work.Kind, kLogObjRef) return ctrl.Result{}, nil } @@ -118,76 +118,34 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // The AppliedWork.Name shall match its respective Work resource. appliedWork := &workv1alpha1.AppliedWork{} if err := r.spokeClient.Get(ctx, types.NamespacedName{Name: kLogObjRef.Name}, appliedWork); err != nil { - klog.ErrorS(err, utils.MessageResourceRetrieveFailed, "AppliedWork", kLogObjRef.Name) - meta.SetStatusCondition(&work.Status.Conditions, generateWorkAvailableStatusCondition(metav1.ConditionFalse, work.Generation)) - - return ctrl.Result{}, errors.Wrap(err, fmt.Sprintf(utils.MessageResourceRetrieveFailed+", %s=%s", "AppliedWork", work.Name)) + klog.ErrorS(err, "failed to retrieve the appliedWork", "AppliedWork", kLogObjRef.Name) + return ctrl.Result{}, err } - + // Apply the manifests to the member cluster owner := metav1.OwnerReference{ APIVersion: workv1alpha1.GroupVersion.String(), Kind: workv1alpha1.AppliedWorkKind, Name: appliedWork.GetName(), UID: appliedWork.GetUID(), } - results := r.applyManifests(ctx, work.Spec.Workload.Manifests, owner) - var errs []error - - // Update manifestCondition based on the results. - var manifestConditions []workv1alpha1.ManifestCondition - for _, result := range results { - if result.err != nil { - errs = append(errs, result.err) - } - - appliedCondition := buildAppliedStatusCondition(result.err, result.updated, result.generation) - manifestCondition := workv1alpha1.ManifestCondition{ - Identifier: result.identifier, - Conditions: []metav1.Condition{appliedCondition}, - } - - foundmanifestCondition := findManifestConditionByIdentifier(result.identifier, work.Status.ManifestConditions) - if foundmanifestCondition != nil { - manifestCondition.Conditions = foundmanifestCondition.Conditions - meta.SetStatusCondition(&manifestCondition.Conditions, appliedCondition) - } - manifestConditions = append(manifestConditions, manifestCondition) - } - - work.Status.ManifestConditions = manifestConditions - - workCond := generateWorkAppliedStatusCondition(manifestConditions, work.Generation) - work.Status.Conditions = []metav1.Condition{workCond} - //Update available status condition of work - meta.SetStatusCondition(&work.Status.Conditions, generateWorkAvailableStatusCondition(workCond.Status, work.Generation)) + // generate the work condition based on the apply result + errs := r.generateWorkCondition(results, work) err = r.client.Status().Update(ctx, work, &client.UpdateOptions{}) if err != nil { - klog.ErrorS(err, utils.MessageResourceStatusUpdateFailed, work.Kind, kLogObjRef) errs = append(errs, err) - } else { - klog.InfoS(utils.MessageResourceStatusUpdateSucceeded, work.Kind, kLogObjRef) + klog.ErrorS(err, "failed to update work status", "work", kLogObjRef) } if len(errs) != 0 { - klog.InfoS(utils.MessageManifestApplyIncomplete, work.Kind, kLogObjRef) - r.recorder.Event( - work, - v1.EventTypeWarning, - utils.EventReasonReconcileIncomplete, - utils.MessageManifestApplyIncomplete) - + klog.InfoS("manifest apply incomplete; the message is queued again for reconciliation", + "work", kLogObjRef, "err", utilerrors.NewAggregate(errs)) return ctrl.Result{}, utilerrors.NewAggregate(errs) } - - r.recorder.Event( - work, - v1.EventTypeNormal, - utils.EventReasonReconcileComplete, - utils.MessageManifestApplyComplete) - + klog.InfoS("apply the work successfully ", "work", kLogObjRef) + r.recorder.Event(work, v1.EventTypeNormal, "ReconciliationComplete", "apply the work successfully") return ctrl.Result{RequeueAfter: time.Minute * 5}, nil } @@ -215,12 +173,12 @@ func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []wo if result.err == nil { result.generation = obj.GetGeneration() if result.updated { - klog.V(4).InfoS(utils.MessageManifestApplySucceeded, "gvr", gvr, "obj", kLogObjRef, "new ObservedGeneration", result.generation) + klog.V(4).InfoS("manifest apply succeeded", "gvr", gvr, "obj", kLogObjRef, "new ObservedGeneration", result.generation) } else { - klog.V(8).InfoS(utils.MessageManifestApplyUnwarranted, "gvr", gvr, "obj", kLogObjRef) + klog.V(5).InfoS("manifest apply unwarranted; the spec has not changed", "gvr", gvr, "obj", kLogObjRef) } } else { - klog.ErrorS(err, utils.MessageManifestApplyFailed, "gvr", gvr, "obj", kLogObjRef) + klog.ErrorS(err, "manifest apply failed", "gvr", gvr, "obj", kLogObjRef) } } results = append(results, result) @@ -258,13 +216,11 @@ func (r *ApplyWorkReconciler) applyUnstructured( return nil, false, err } - curObj, err := r.spokeDynamicClient. - Resource(gvr). - Namespace(manifestObj.GetNamespace()). + curObj, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()). Get(ctx, manifestObj.GetName(), metav1.GetOptions{}) if apierrors.IsNotFound(err) { actual, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Create( - ctx, manifestObj, metav1.CreateOptions{FieldManager: utils.WorkFieldManagerName}) + ctx, manifestObj, metav1.CreateOptions{FieldManager: workFieldManagerName}) return actual, true, err } @@ -273,8 +229,8 @@ func (r *ApplyWorkReconciler) applyUnstructured( } if !hasSharedOwnerReference(curObj.GetOwnerReferences(), manifestObj.GetOwnerReferences()[0]) { - err = errors.New(utils.MessageResourceStateInvalid) - klog.ErrorS(err, utils.MessageResourceNotOwnedByWorkAPI, "gvr", gvr, "obj", kLogObjRef) + err = fmt.Errorf("resource is not managed by the work controller") + klog.ErrorS(err, "skip applying the manifest", "gvr", gvr, "obj", kLogObjRef) return nil, false, err } @@ -282,7 +238,7 @@ func (r *ApplyWorkReconciler) applyUnstructured( // We only try to update the object if its spec hash value has changed. if manifestObj.GetAnnotations()[specHashAnnotation] != curObj.GetAnnotations()[specHashAnnotation] { klog.V(5).InfoS( - utils.MessageResourceSpecModified, "gvr", gvr, "obj", kLogObjRef, + "resource spec is modified", "gvr", gvr, "obj", kLogObjRef, "new hash", manifestObj.GetAnnotations()[specHashAnnotation], "existing hash", curObj.GetAnnotations()[specHashAnnotation]) @@ -292,19 +248,18 @@ func (r *ApplyWorkReconciler) applyUnstructured( newData, err := manifestObj.MarshalJSON() if err != nil { - klog.ErrorS(err, utils.MessageResourceJSONMarshalFailed, "gvr", gvr, "obj", kLogObjRef) + klog.ErrorS(err, "failed to JSON marshall manifest", "gvr", gvr, "obj", kLogObjRef) return nil, false, err } // Use sever-side apply to be safe. actual, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()). Patch(ctx, manifestObj.GetName(), types.ApplyPatchType, newData, - metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: utils.WorkFieldManagerName}) + metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: workFieldManagerName}) if err != nil { - klog.ErrorS(err, utils.MessageResourcePatchFailed, "gvr", gvr, "obj", kLogObjRef) - + klog.ErrorS(err, "failed to patch the manifest", "gvr", gvr, "obj", kLogObjRef) return nil, false, err } - klog.V(5).InfoS(utils.MessageResourcePatchSucceeded, "gvr", gvr, "obj", kLogObjRef) + klog.V(4).InfoS("resource patch succeeded", "gvr", gvr, "obj", kLogObjRef) return actual, true, err } @@ -312,6 +267,35 @@ func (r *ApplyWorkReconciler) applyUnstructured( return curObj, false, nil } +// generateWorkCondition constructs the work condition based on the apply result +func (r *ApplyWorkReconciler) generateWorkCondition(results []applyResult, work *workv1alpha1.Work) []error { + var errs []error + // Update manifestCondition based on the results. + var manifestConditions []workv1alpha1.ManifestCondition + for _, result := range results { + if result.err != nil { + errs = append(errs, result.err) + } + + appliedCondition := buildAppliedStatusCondition(result.err, result.updated, result.generation) + manifestCondition := workv1alpha1.ManifestCondition{ + Identifier: result.identifier, + Conditions: []metav1.Condition{appliedCondition}, + } + foundmanifestCondition := findManifestConditionByIdentifier(result.identifier, work.Status.ManifestConditions) + if foundmanifestCondition != nil { + manifestCondition.Conditions = foundmanifestCondition.Conditions + meta.SetStatusCondition(&manifestCondition.Conditions, appliedCondition) + } + manifestConditions = append(manifestConditions, manifestCondition) + } + + work.Status.ManifestConditions = manifestConditions + workCond := generateWorkAppliedStatusCondition(manifestConditions, work.Generation) + work.Status.Conditions = []metav1.Condition{workCond} + return errs +} + // SetupWithManager wires up the controller. func (r *ApplyWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). diff --git a/pkg/controllers/apply_controller_unit_test.go b/pkg/controllers/apply_controller_unit_test.go index 2d457d46..aca72e57 100644 --- a/pkg/controllers/apply_controller_unit_test.go +++ b/pkg/controllers/apply_controller_unit_test.go @@ -83,6 +83,7 @@ func (m testMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta. } func TestApplyManifest(t *testing.T) { + failMsg := "manifest apply failed" // Manifests rawInvalidResource, _ := json.Marshal([]byte(rand.String(10))) rawMissingResource, _ := json.Marshal( @@ -110,7 +111,7 @@ func TestApplyManifest(t *testing.T) { // DynamicClients clientFailDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) clientFailDynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New(utils.MessageManifestApplyFailed) + return true, nil, errors.New(failMsg) }) testCases := map[string]struct { @@ -182,7 +183,7 @@ func TestApplyManifest(t *testing.T) { generation: 0, updated: false, wantGvr: expectedGvr, - wantErr: errors.New(utils.MessageManifestApplyFailed), + wantErr: errors.New(failMsg), }, } @@ -318,7 +319,7 @@ func TestApplyUnstructured(t *testing.T) { }, workObj: correctObj.DeepCopy(), resultBool: false, - resultErr: errors.New(utils.MessageResourceStateInvalid), + resultErr: errors.New("resource is not managed by the work controller"), }, "equal spec hash of current vs work object / succeed without updates": { reconciler: ApplyWorkReconciler{ @@ -379,6 +380,7 @@ func TestApplyUnstructured(t *testing.T) { } func TestReconcile(t *testing.T) { + failMsg := "manifest apply failed" workNamespace := rand.String(10) workName := rand.String(10) appliedWorkName := rand.String(10) @@ -470,7 +472,7 @@ func TestReconcile(t *testing.T) { clientFailDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) clientFailDynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New(utils.MessageManifestApplyFailed) + return true, nil, errors.New(failMsg) }) testCases := map[string]struct { @@ -490,7 +492,7 @@ func TestReconcile(t *testing.T) { }, req: req, wantErr: nil, - requeue: false, + requeue: true, }, "work cannot be retrieved, client failed due to client error": { reconciler: ApplyWorkReconciler{ @@ -587,7 +589,7 @@ func TestReconcile(t *testing.T) { Joined: true, }, req: req, - wantErr: errors.New(utils.MessageResourceRetrieveFailed), + wantErr: errors.New("manifest apply unwarranted; the spec has not changed"), }, "ApplyManifest fails": { reconciler: ApplyWorkReconciler{ @@ -609,14 +611,14 @@ func TestReconcile(t *testing.T) { Joined: true, }, req: req, - wantErr: errors.New(utils.MessageManifestApplyFailed), + wantErr: errors.New(failMsg), }, "client update fails": { reconciler: ApplyWorkReconciler{ client: &test.MockClient{ MockGet: getMock, MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { - return errors.New(utils.MessageResourceStatusUpdateFailed) + return errors.New("failed") }, }, spokeDynamicClient: clientFailDynamicClient, @@ -628,7 +630,7 @@ func TestReconcile(t *testing.T) { Joined: true, }, req: req, - wantErr: errors.New(utils.MessageResourceStatusUpdateFailed), + wantErr: errors.New("failed"), }, "Happy Path": { reconciler: ApplyWorkReconciler{ diff --git a/pkg/controllers/finalize_controller.go b/pkg/controllers/finalize_controller.go index 0d6270ea..10789afb 100644 --- a/pkg/controllers/finalize_controller.go +++ b/pkg/controllers/finalize_controller.go @@ -20,7 +20,6 @@ import ( "context" "time" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -33,11 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - "sigs.k8s.io/work-api/pkg/utils" -) - -const ( - messageAppliedWorkFinalizerNotFound = "AppliedWork finalizer object does not exist yet, it will be created" ) // FinalizeWorkReconciler reconciles a Work object for finalization @@ -59,12 +53,11 @@ func NewFinalizeWorkReconciler(hubClient client.Client, spokeClient client.Clien // Reconcile implement the control loop logic for finalizing Work object. func (r *FinalizeWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.InfoS("Work finalize controller reconcile loop triggered.", "item", req.NamespacedName) - if !r.Joined { - klog.InfoS("finalize controller is not started yet") + klog.InfoS("finalize controller is not started yet", "work", req.NamespacedName) return ctrl.Result{RequeueAfter: time.Second * 5}, nil } + klog.InfoS("Work finalize controller reconcile loop triggered.", "work", req.NamespacedName) work := &workv1alpha1.Work{} err := r.client.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, work) @@ -72,6 +65,7 @@ func (r *FinalizeWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request case errors.IsNotFound(err): return ctrl.Result{}, nil case err != nil: + klog.ErrorS(err, "fail to retrieve the work resource", "work", req.NamespacedName) return ctrl.Result{}, err } @@ -85,20 +79,16 @@ func (r *FinalizeWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request appliedWork := &workv1alpha1.AppliedWork{} if controllerutil.ContainsFinalizer(work, workFinalizer) { err = r.spokeClient.Get(ctx, req.NamespacedName, appliedWork) - if err != nil { - if errors.IsNotFound(err) { - klog.ErrorS(err, messageAppliedWorkFinalizerNotFound, "AppliedWork", kLogObjRef.Name) - } else { - klog.ErrorS(err, utils.MessageResourceRetrieveFailed, "AppliedWork", kLogObjRef.Name) - return ctrl.Result{}, err - } - } else { - // everything is fine, don't need to do anything - return ctrl.Result{}, nil + switch { + case errors.IsNotFound(err): + klog.ErrorS(err, "appliedWork finalizer resource does not exist yet, it will be created", "AppliedWork", kLogObjRef.Name) + case err != nil: + klog.ErrorS(err, "failed to retrieve the appliedWork", "AppliedWork", kLogObjRef.Name) + return ctrl.Result{}, err } } - klog.InfoS(messageAppliedWorkFinalizerNotFound, "AppliedWork", kLogObjRef.Name) + klog.InfoS("create the appliedWork resource", "AppliedWork", kLogObjRef.Name) appliedWork = &workv1alpha1.AppliedWork{ ObjectMeta: metav1.ObjectMeta{ Name: req.Name, @@ -111,23 +101,11 @@ func (r *FinalizeWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request err = r.spokeClient.Create(ctx, appliedWork) if err != nil && !errors.IsAlreadyExists(err) { // if this conflicts, we'll simply try again later - klog.ErrorS(err, utils.MessageResourceCreateFailed, "AppliedWork", kLogObjRef.Name) + klog.ErrorS(err, "appliedWork create failed", "AppliedWork", kLogObjRef.Name) return ctrl.Result{}, err } - r.recorder.Event(appliedWork, corev1.EventTypeNormal, utils.EventReasonAppliedWorkCreated, utils.MessageResourceCreateSucceeded) - - r.recorder.Event(work, corev1.EventTypeNormal, utils.EventReasonAppliedWorkCreated, "AppliedWork resource was created") work.Finalizers = append(work.Finalizers, workFinalizer) - if err = r.client.Update(ctx, work, &client.UpdateOptions{}); err == nil { - r.recorder.Eventf( - work, - corev1.EventTypeNormal, - utils.EventReasonFinalizerAdded, - utils.MessageResourceFinalizerAdded+", finalizer=%s", - workFinalizer) - } - return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) } @@ -136,24 +114,19 @@ func (r *FinalizeWorkReconciler) garbageCollectAppliedWork(ctx context.Context, if controllerutil.ContainsFinalizer(work, workFinalizer) { deletePolicy := metav1.DeletePropagationForeground appliedWork := workv1alpha1.AppliedWork{} - err := r.spokeClient.Get(ctx, types.NamespacedName{ - Name: work.Name, - }, &appliedWork) + err := r.spokeClient.Get(ctx, types.NamespacedName{Name: work.Name}, &appliedWork) if err != nil { - klog.ErrorS(err, utils.MessageResourceRetrieveFailed, "AppliedWork", work.Name) + klog.ErrorS(err, "failed to retrieve the appliedWork", "AppliedWork", work.Name) return ctrl.Result{}, err } err = r.spokeClient.Delete(ctx, &appliedWork, &client.DeleteOptions{PropagationPolicy: &deletePolicy}) if err != nil { - klog.ErrorS(err, utils.MessageResourceDeleteFailed, "AppliedWork", work.Name) + klog.ErrorS(err, "failed to delete the appliedWork", "AppliedWork", work.Name) return ctrl.Result{}, err } - - r.recorder.Eventf(work, corev1.EventTypeNormal, utils.EventReasonAppliedWorkDeleted, utils.MessageResourceDeleteSucceeded+", AppliedWork=%s", work.Name) - klog.InfoS(utils.MessageResourceDeleteSucceeded, "AppliedWork", work.Name) + klog.InfoS("successfully deleted the appliedWork", "AppliedWork", work.Name) controllerutil.RemoveFinalizer(work, workFinalizer) - r.recorder.Eventf(work, corev1.EventTypeNormal, utils.EventReasonFinalizerRemoved, utils.MessageResourceFinalizerRemoved+", finalizer="+workFinalizer) } return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) diff --git a/pkg/controllers/work_status_controller.go b/pkg/controllers/work_status_controller.go index 6e452696..e3d22352 100644 --- a/pkg/controllers/work_status_controller.go +++ b/pkg/controllers/work_status_controller.go @@ -18,7 +18,7 @@ package controllers import ( "context" - "github.com/pkg/errors" + "fmt" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -37,7 +37,6 @@ import ( "time" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - "sigs.k8s.io/work-api/pkg/utils" ) // WorkStatusReconciler reconciles a Work object when its status changes @@ -83,22 +82,21 @@ func (r *WorkStatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) // from now on both work objects should exist newRes, staleRes := r.calculateNewAppliedWork(work, appliedWork) - if err = r.deleteStaleWork(ctx, staleRes); err != nil { - klog.ErrorS(err, utils.MessageResourceGarbageCollectionIncomplete, work.Kind, kLogObjRef) - r.recorder.Event(work, v1.EventTypeWarning, utils.EventReasonResourceGarbageCollectionIncomplete, utils.MessageResourceGarbageCollectionIncomplete) - + if err = r.deleteStaleManifest(ctx, staleRes); err != nil { + klog.ErrorS(err, "resource garbage-collection incomplete; some Work owned resources could not be deleted", work.Kind, kLogObjRef) // we can't proceed to update the applied return ctrl.Result{}, err } else if len(staleRes) > 0 && err == nil { // TODO: Specify which manifest was deleted. - klog.InfoS(utils.MessageResourceGarbageCollectionComplete, work.Kind, kLogObjRef) - r.recorder.Event(work, v1.EventTypeNormal, utils.EventReasonResourceGarbageCollectionComplete, utils.MessageResourceGarbageCollectionComplete) + msg := "successfully garbage-collected all stale manifests" + klog.InfoS(msg, work.Kind, kLogObjRef) + r.recorder.Event(work, v1.EventTypeNormal, "ResourceGarbageCollectionComplete", msg) } // update the appliedWork with the new work appliedWork.Status.AppliedResources = newRes if err = r.spokeClient.Status().Update(ctx, appliedWork, &client.UpdateOptions{}); err != nil { - klog.ErrorS(err, utils.MessageResourceStatusUpdateFailed, appliedWork.Kind, appliedWork.GetName()) + klog.ErrorS(err, "failed to update appliedWork status", appliedWork.Kind, appliedWork.GetName()) return ctrl.Result{}, err } @@ -121,11 +119,8 @@ func (r *WorkStatusReconciler) calculateNewAppliedWork(work *workapi.Work, appli } } if !resStillExist { - klog.V(3).InfoS(utils.MessageResourceIsOrphan, - "parent GVK", work.GetObjectKind().GroupVersionKind(), - "parent resource", work.GetName(), - "orphan resource", resourceMeta) - + klog.V(3).InfoS("find an orphaned resource in the member cluster", + "parent resource", work.GetName(), "orphaned resource", resourceMeta.ResourceIdentifier) staleRes = append(staleRes, resourceMeta) } } @@ -133,10 +128,7 @@ func (r *WorkStatusReconciler) calculateNewAppliedWork(work *workapi.Work, appli for _, manifestCond := range work.Status.ManifestConditions { ac := meta.FindStatusCondition(manifestCond.Conditions, ConditionTypeApplied) if ac == nil { - err := errors.New(utils.MessageResourceStateInvalid) - klog.ErrorS(err, utils.MessageResourceIsMissingCondition, - "resource", manifestCond.Identifier, - "missing condition", ConditionTypeApplied) + klog.ErrorS(fmt.Errorf("resource is missing applied condition"), "applied condition missing", "resource", manifestCond.Identifier) continue } // we only add the applied one to the appliedWork status @@ -151,11 +143,8 @@ func (r *WorkStatusReconciler) calculateNewAppliedWork(work *workapi.Work, appli } } if !resRecorded { - klog.V(5).InfoS(utils.MessageResourceDiscovered, - "parent GVK", work.GetObjectKind().GroupVersionKind(), - "parent Work", work.GetName(), - "discovered resource", manifestCond.Identifier) - + klog.V(5).InfoS("discovered a new resource", + "parent Work", work.GetName(), "discovered resource", manifestCond.Identifier) newRes = append(newRes, workapi.AppliedResourceMeta{ ResourceIdentifier: manifestCond.Identifier, }) @@ -166,19 +155,19 @@ func (r *WorkStatusReconciler) calculateNewAppliedWork(work *workapi.Work, appli return newRes, staleRes } -func (r *WorkStatusReconciler) deleteStaleWork(ctx context.Context, staleWorks []workapi.AppliedResourceMeta) error { +func (r *WorkStatusReconciler) deleteStaleManifest(ctx context.Context, staleManifests []workapi.AppliedResourceMeta) error { var errs []error - for _, staleWork := range staleWorks { + for _, staleManifest := range staleManifests { gvr := schema.GroupVersionResource{ - Group: staleWork.Group, - Version: staleWork.Version, - Resource: staleWork.Resource, + Group: staleManifest.Group, + Version: staleManifest.Version, + Resource: staleManifest.Resource, } - err := r.spokeDynamicClient.Resource(gvr).Namespace(staleWork.Namespace). - Delete(ctx, staleWork.Name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsGone(err) { - klog.ErrorS(err, utils.MessageResourceDeleteFailed, "resource", staleWork) + err := r.spokeDynamicClient.Resource(gvr).Namespace(staleManifest.Namespace). + Delete(ctx, staleManifest.Name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + klog.ErrorS(err, "failed to delete the staled manifest", "manifest", staleManifest) errs = append(errs, err) } } diff --git a/pkg/utils/constants.go b/pkg/utils/constants.go deleted file mode 100644 index cf01b4d9..00000000 --- a/pkg/utils/constants.go +++ /dev/null @@ -1,41 +0,0 @@ -package utils - -const ( - WorkFieldManagerName = "work-api-agent" - - EventReasonAppliedWorkCreated = "AppliedWorkCreated" - EventReasonAppliedWorkDeleted = "AppliedWorkDeleted" - EventReasonFinalizerAdded = "FinalizerAdded" - EventReasonFinalizerRemoved = "FinalizerRemoved" - EventReasonReconcileComplete = "ReconciliationComplete" - EventReasonReconcileIncomplete = "ReconciliationIncomplete" - EventReasonResourceGarbageCollectionComplete = "ResourceGarbageCollectionComplete" - EventReasonResourceGarbageCollectionIncomplete = "ResourceGarbageCollectionIncomplete" - - MessageManifestApplyFailed = "manifest apply failed" - MessageManifestApplyComplete = "manifest apply completed" - MessageManifestApplyIncomplete = "manifest apply incomplete; the respective Work will be queued again for reconciliation" - MessageManifestApplySucceeded = "manifest apply succeeded" - MessageManifestApplyUnwarranted = "manifest apply unwarranted; the spec has not changed" - MessageResourceFinalizerRemoved = "resource's finalizer removed" - MessageResourceCreateSucceeded = "resource create succeeded" - MessageResourceCreateFailed = "resource create failed" - MessageResourceDeleteSucceeded = "resource delete succeeded" - MessageResourceDeleting = "resource is in the process of being deleted" - MessageResourceDeleteFailed = "resource delete failed" - MessageResourceDiscovered = "resource discovered" - MessageResourceFinalizerAdded = "resource finalizer added" - MessageResourceGarbageCollectionComplete = "resource garbage-collection complete" - MessageResourceGarbageCollectionIncomplete = "resource garbage-collection incomplete; some Work owned resources could not be deleted" - MessageResourceIsOrphan = "resource is an orphan" - MessageResourceIsMissingCondition = "resource is missing condition" - MessageResourceJSONMarshalFailed = "resource JSON marshaling failed" - MessageResourceNotOwnedByWorkAPI = "resource not owned by Work-API" - MessageResourcePatchFailed = "resource patch failed" - MessageResourcePatchSucceeded = "resource patch succeeded" - MessageResourceRetrieveFailed = "resource retrieval failed" - MessageResourceStateInvalid = "resource state is invalid" - MessageResourceSpecModified = "resource spec modified" - MessageResourceStatusUpdateFailed = "resource status update failed" - MessageResourceStatusUpdateSucceeded = "resource status update succeeded" -) diff --git a/pkg/utils/test_utils.go b/pkg/utils/test_utils.go index 406e9c89..102e67d1 100644 --- a/pkg/utils/test_utils.go +++ b/pkg/utils/test_utils.go @@ -3,6 +3,8 @@ package utils import ( "github.com/onsi/gomega/format" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/record" ) @@ -35,3 +37,38 @@ func (matcher AlreadyExistMatcher) FailureMessage(actual interface{}) (message s func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, "not to be already exist") } + +func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { + owners := object.GetOwnerReferences() + if idx := indexOwnerRef(owners, ref); idx == -1 { + owners = append(owners, ref) + } else { + owners[idx] = ref + } + object.SetOwnerReferences(owners) +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} + +// Returns true if a and b point to the same object. +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} From bd375d55804b8a3b7b67a378e81c1412eb714637 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Wed, 17 Aug 2022 00:14:12 -0700 Subject: [PATCH 2/7] refactor code --- README.md | 2 +- cmd/workcontroller/workcontroller.go | 8 +- examples/example-work-copy.yaml | 51 + examples/example-work.yaml | 2 + pkg/controllers/apply_controller.go | 347 ++++--- .../apply_controller_integratoin_test.go | 123 +++ pkg/controllers/apply_controller_test.go | 928 ++++++++++++++++-- pkg/controllers/apply_controller_unit_test.go | 684 ------------- ...it_test.go => finalize_controller_test.go} | 0 pkg/controllers/manager.go | 14 +- pkg/controllers/resource_tracker.go | 13 +- pkg/controllers/suite_test.go | 4 - pkg/controllers/work_status_controller.go | 74 +- ...test.go => work_status_controller_test.go} | 16 +- pkg/utils/common.go | 42 + pkg/utils/test_utils.go | 37 - 16 files changed, 1316 insertions(+), 1029 deletions(-) create mode 100644 examples/example-work-copy.yaml create mode 100644 pkg/controllers/apply_controller_integratoin_test.go delete mode 100644 pkg/controllers/apply_controller_unit_test.go rename pkg/controllers/{finalize_controller_unit_test.go => finalize_controller_test.go} (100%) rename pkg/controllers/{work_status_controller_unit_test.go => work_status_controller_test.go} (91%) create mode 100644 pkg/utils/common.go diff --git a/README.md b/README.md index 026c64f8..9189bc86 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ kubectl apply -k deploy ```shell kubectl delete secret hub-kubeconfig-secret -n fleet-system kubectl create secret generic hub-kubeconfig-secret --from-file=kubeconfig=/Users/ryanzhang/.kube/hub -n fleet-system -go run cmd/workcontroller/workcontroller.go --work-namespace=default --hub-secret=hub-kubeconfig-secret +go run cmd/workcontroller/workcontroller.go --work-namespace=default --hub-kubeconfig-secret=hub-kubeconfig-secret ``` diff --git a/cmd/workcontroller/workcontroller.go b/cmd/workcontroller/workcontroller.go index 194251a3..b9fcadf7 100644 --- a/cmd/workcontroller/workcontroller.go +++ b/cmd/workcontroller/workcontroller.go @@ -31,8 +31,6 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "sigs.k8s.io/work-api/pkg/controllers" "sigs.k8s.io/work-api/version" @@ -72,8 +70,10 @@ func main() { flag.IntVar(&concurrentReconciles, "concurrency", 5, "max work reconciler concurrency") flag.Parse() - defer klog.Flush() + flag.VisitAll(func(f *flag.Flag) { + klog.InfoS("flag:", "name", f.Name, "value", f.Value) + }) opts := ctrl.Options{ Scheme: scheme, @@ -85,7 +85,7 @@ func main() { HealthProbeBindAddress: healthAddr, Namespace: workNamespace, } - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + var hubConfig *restclient.Config var err error diff --git a/examples/example-work-copy.yaml b/examples/example-work-copy.yaml new file mode 100644 index 00000000..228a3cd8 --- /dev/null +++ b/examples/example-work-copy.yaml @@ -0,0 +1,51 @@ +apiVersion: multicluster.x-k8s.io/v1alpha1 +kind: Work +metadata: + name: test-work-2 + namespace: default +spec: + workload: + manifests: + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: test-nginx + namespace: default + labels: + service-name: test-nginx-export + spec: + replicas: 2 + selector: + matchLabels: + app: test-nginx + strategy: + rollingUpdate: + maxSurge: 50% + maxUnavailable: 50% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: test-nginx + spec: + containers: + - image: nginx:1.14.2 + name: nginx + ports: + - containerPort: 80 + resources: {} + - apiVersion: v1 + kind: Service + metadata: + labels: + run: test-nginx + name: test-nginx + namespace: default + spec: + ports: + - port: 80 + protocol: TCP + targetPort: 0 + selector: + run: test-nginx diff --git a/examples/example-work.yaml b/examples/example-work.yaml index 9b19744d..0e7eadb9 100644 --- a/examples/example-work.yaml +++ b/examples/example-work.yaml @@ -11,6 +11,8 @@ spec: metadata: name: test-nginx namespace: default + labels: + service-name: test-nginx-export spec: replicas: 2 selector: diff --git a/pkg/controllers/apply_controller.go b/pkg/controllers/apply_controller.go index ba9de430..c003a33d 100644 --- a/pkg/controllers/apply_controller.go +++ b/pkg/controllers/apply_controller.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/json" "fmt" + "sigs.k8s.io/work-api/pkg/utils" "time" v1 "k8s.io/api/core/v1" @@ -57,7 +58,7 @@ type ApplyWorkReconciler struct { restMapper meta.RESTMapper recorder record.EventRecorder concurrency int - Joined bool + joined bool } func NewApplyWorkReconciler(hubClient client.Client, spokeDynamicClient dynamic.Interface, spokeClient client.Client, restMapper meta.RESTMapper, recorder record.EventRecorder, concurrency int, joined bool) *ApplyWorkReconciler { @@ -68,7 +69,7 @@ func NewApplyWorkReconciler(hubClient client.Client, spokeDynamicClient dynamic. restMapper: restMapper, recorder: recorder, concurrency: concurrency, - Joined: joined, + joined: joined, } } @@ -82,17 +83,18 @@ type applyResult struct { // Reconcile implement the control loop logic for Work object. func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - if !r.Joined { - klog.InfoS("work controller is not started yet", "work", req.NamespacedName) + if !r.joined { + klog.V(3).InfoS("work controller is not started yet, requeue the request", "work", req.NamespacedName) return ctrl.Result{RequeueAfter: time.Second * 5}, nil } - klog.InfoS("Work apply controller reconcile loop triggered.", "work", req.NamespacedName) + klog.InfoS("work apply controller reconcile loop triggered.", "work", req.NamespacedName) + // Fetch the work resource work := &workv1alpha1.Work{} err := r.client.Get(ctx, req.NamespacedName, work) switch { case apierrors.IsNotFound(err): - klog.InfoS("the work resource is deleted", "work", req.NamespacedName) + klog.V(4).InfoS("the work resource is deleted", "work", req.NamespacedName) return ctrl.Result{}, nil case err != nil: klog.ErrorS(err, "failed to retrieve the work", "work", req.NamespacedName) @@ -100,41 +102,33 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } kLogObjRef := klog.KObj(work) - // do nothing if the finalizer is not present - // it ensures all maintained resources will be cleaned once work is deleted - // TODO: merge the finalizer controller into this - if !controllerutil.ContainsFinalizer(work, workFinalizer) { - klog.InfoS("the work resource has no finalizer yet, it will be added", work.Kind, kLogObjRef) - return ctrl.Result{}, nil - } - - // leave the finalizer to clean up + // Handle deleting work, garbage collect the resources if !work.DeletionTimestamp.IsZero() { - klog.InfoS("resource is in the process of being deleted", work.Kind, kLogObjRef) - return ctrl.Result{}, nil + klog.V(2).InfoS("resource is in the process of being deleted", work.Kind, kLogObjRef) + return r.garbageCollectAppliedWork(ctx, work) } - // We created the AppliedWork before setting the finalizer, it should already exist. - // The AppliedWork.Name shall match its respective Work resource. - appliedWork := &workv1alpha1.AppliedWork{} - if err := r.spokeClient.Get(ctx, types.NamespacedName{Name: kLogObjRef.Name}, appliedWork); err != nil { - klog.ErrorS(err, "failed to retrieve the appliedWork", "AppliedWork", kLogObjRef.Name) + // Get the appliedWork + appliedWork, err := r.ensureAppliedWork(ctx, work) + if err != nil { return ctrl.Result{}, err } - // Apply the manifests to the member cluster owner := metav1.OwnerReference{ - APIVersion: workv1alpha1.GroupVersion.String(), - Kind: workv1alpha1.AppliedWorkKind, - Name: appliedWork.GetName(), - UID: appliedWork.GetUID(), + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: workv1alpha1.AppliedWorkKind, + Name: appliedWork.GetName(), + UID: appliedWork.GetUID(), + BlockOwnerDeletion: pointer.Bool(false), } + + // Apply the manifests to the member cluster results := r.applyManifests(ctx, work.Spec.Workload.Manifests, owner) - // generate the work condition based on the apply result + // generate the work condition based on the manifest apply result errs := r.generateWorkCondition(results, work) - err = r.client.Status().Update(ctx, work, &client.UpdateOptions{}) - if err != nil { + // update the work status + if err = r.client.Status().Update(ctx, work, &client.UpdateOptions{}); err != nil { errs = append(errs, err) klog.ErrorS(err, "failed to update work status", "work", kLogObjRef) } @@ -146,12 +140,81 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } klog.InfoS("apply the work successfully ", "work", kLogObjRef) r.recorder.Event(work, v1.EventTypeNormal, "ReconciliationComplete", "apply the work successfully") + // we periodically reconcile the work to make sure the member cluster state is in sync with the work return ctrl.Result{RequeueAfter: time.Minute * 5}, nil } +// garbageCollectAppliedWork deletes the appliedWork and all the manifests associated with it from the cluster. +func (r *ApplyWorkReconciler) garbageCollectAppliedWork(ctx context.Context, work *workv1alpha1.Work) (ctrl.Result, error) { + deletePolicy := metav1.DeletePropagationBackground + if !controllerutil.ContainsFinalizer(work, workFinalizer) { + return ctrl.Result{}, nil + } + // delete the appliedWork which will remove all the manifests associated with it + // TODO: allow orphaned manifest + appliedWork := workv1alpha1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{Name: work.Name}, + } + err := r.spokeClient.Delete(ctx, &appliedWork, &client.DeleteOptions{PropagationPolicy: &deletePolicy}) + switch { + case apierrors.IsNotFound(err): + klog.V(4).InfoS("the appliedWork is already deleted", "appliedWork", work.Name) + case err != nil: + klog.ErrorS(err, "failed to delete the appliedWork", "appliedWork", work.Name) + return ctrl.Result{}, err + default: + klog.InfoS("successfully deleted the appliedWork", "appliedWork", work.Name) + } + controllerutil.RemoveFinalizer(work, workFinalizer) + return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) +} + +// ensureAppliedWork makes sure that an associated appliedWork and a finalizer on the work resource exsits on the cluster. +func (r *ApplyWorkReconciler) ensureAppliedWork(ctx context.Context, work *workv1alpha1.Work) (*workv1alpha1.AppliedWork, error) { + workRef := klog.KObj(work) + appliedWork := &workv1alpha1.AppliedWork{} + hasFinalizer := false + if controllerutil.ContainsFinalizer(work, workFinalizer) { + hasFinalizer = true + err := r.spokeClient.Get(ctx, types.NamespacedName{Name: work.Name}, appliedWork) + switch { + case apierrors.IsNotFound(err): + klog.ErrorS(err, "appliedWork finalizer resource does not exist even with the finalizer, it will be recreated", "appliedWork", workRef.Name) + case err != nil: + klog.ErrorS(err, "failed to retrieve the appliedWork ", "appliedWork", workRef.Name) + return nil, err + default: + return appliedWork, nil + } + } + + // we create the appliedWork before setting the finalizer, so it should always exist unless it's deleted behind our back + appliedWork = &workv1alpha1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: work.Name, + }, + Spec: workv1alpha1.AppliedWorkSpec{ + WorkName: work.Name, + WorkNamespace: work.Namespace, + }, + } + if err := r.spokeClient.Create(ctx, appliedWork); err != nil && !apierrors.IsAlreadyExists(err) { + klog.ErrorS(err, "appliedWork create failed", "appliedWork", workRef.Name) + return nil, err + } + if !hasFinalizer { + klog.InfoS("add the finalizer to the work", "work", workRef) + work.Finalizers = append(work.Finalizers, workFinalizer) + return appliedWork, r.client.Update(ctx, work, &client.UpdateOptions{}) + } + klog.InfoS("recreated the appliedWork resource", "appliedWork", workRef.Name) + return appliedWork, nil +} + // applyManifests processes a given set of Manifests by: setting ownership, validating the manifest, and passing it on for application to the cluster. func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []workv1alpha1.Manifest, owner metav1.OwnerReference) []applyResult { var results []applyResult + var appliedObj *unstructured.Unstructured for index, manifest := range manifests { result := applyResult{ @@ -161,24 +224,22 @@ func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []wo if err != nil { result.err = err } else { - var obj *unstructured.Unstructured + utils.UpsertOwnerRef(owner, rawObj) + appliedObj, result.updated, result.err = r.applyUnstructured(ctx, gvr, rawObj) result.identifier = buildResourceIdentifier(index, rawObj, gvr) kLogObjRef := klog.ObjectRef{ Name: result.identifier.Name, Namespace: result.identifier.Namespace, } - // TODO: add webhooks to block any manifest that has owner reference - rawObj.SetOwnerReferences([]metav1.OwnerReference{owner}) - obj, result.updated, result.err = r.applyUnstructured(ctx, gvr, rawObj) if result.err == nil { - result.generation = obj.GetGeneration() + result.generation = appliedObj.GetGeneration() if result.updated { - klog.V(4).InfoS("manifest apply succeeded", "gvr", gvr, "obj", kLogObjRef, "new ObservedGeneration", result.generation) + klog.V(4).InfoS("manifest upsert succeeded", "gvr", gvr, "manifest", kLogObjRef, "new ObservedGeneration", result.generation) } else { - klog.V(5).InfoS("manifest apply unwarranted; the spec has not changed", "gvr", gvr, "obj", kLogObjRef) + klog.V(5).InfoS("manifest upsert unwarranted", "gvr", gvr, "manifest", kLogObjRef) } } else { - klog.ErrorS(err, "manifest apply failed", "gvr", gvr, "obj", kLogObjRef) + klog.ErrorS(result.err, "manifest upsert failed", "gvr", gvr, "manifest", kLogObjRef) } } results = append(results, result) @@ -203,70 +264,70 @@ func (r *ApplyWorkReconciler) decodeManifest(manifest workv1alpha1.Manifest) (sc } // Determines if an unstructured manifest object can & should be applied. If so, it applies (creates) the resource on the cluster. -func (r *ApplyWorkReconciler) applyUnstructured( - ctx context.Context, - gvr schema.GroupVersionResource, - manifestObj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { - kLogObjRef := klog.ObjectRef{ +func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema.GroupVersionResource, manifestObj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + manifestRef := klog.ObjectRef{ Name: manifestObj.GetName(), Namespace: manifestObj.GetName(), } - err := setSpecHashAnnotation(manifestObj) - if err != nil { - return nil, false, err - } - - curObj, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()). - Get(ctx, manifestObj.GetName(), metav1.GetOptions{}) + curObj, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Get(ctx, manifestObj.GetName(), metav1.GetOptions{}) if apierrors.IsNotFound(err) { - actual, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Create( + actual, createErr := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Create( ctx, manifestObj, metav1.CreateOptions{FieldManager: workFieldManagerName}) - - return actual, true, err + if createErr == nil || apierrors.IsAlreadyExists(createErr) { + klog.V(4).InfoS("successfully created the manfiest", "gvr", gvr, "manfiest", manifestRef) + return actual, true, nil + } + return nil, false, createErr } if err != nil { return nil, false, err } - if !hasSharedOwnerReference(curObj.GetOwnerReferences(), manifestObj.GetOwnerReferences()[0]) { + // check if the existing manifest is managed by the work + if !isManifestManagedByWork(curObj.GetOwnerReferences()) { err = fmt.Errorf("resource is not managed by the work controller") - klog.ErrorS(err, "skip applying the manifest", "gvr", gvr, "obj", kLogObjRef) - + klog.ErrorS(err, "skip applying a not managed manifest", "gvr", gvr, "obj", manifestRef) return nil, false, err } + err = setManifestHashAnnotation(manifestObj) + if err != nil { + return nil, false, err + } // We only try to update the object if its spec hash value has changed. - if manifestObj.GetAnnotations()[specHashAnnotation] != curObj.GetAnnotations()[specHashAnnotation] { - klog.V(5).InfoS( - "resource spec is modified", "gvr", gvr, "obj", kLogObjRef, - "new hash", manifestObj.GetAnnotations()[specHashAnnotation], - "existing hash", curObj.GetAnnotations()[specHashAnnotation]) - - manifestObj.SetAnnotations(mergeMapOverrideWithDst(curObj.GetAnnotations(), manifestObj.GetAnnotations())) - manifestObj.SetLabels(mergeMapOverrideWithDst(curObj.GetLabels(), manifestObj.GetLabels())) - manifestObj.SetOwnerReferences(mergeOwnerReference(curObj.GetOwnerReferences(), manifestObj.GetOwnerReferences())) - - newData, err := manifestObj.MarshalJSON() - if err != nil { - klog.ErrorS(err, "failed to JSON marshall manifest", "gvr", gvr, "obj", kLogObjRef) - return nil, false, err - } - // Use sever-side apply to be safe. - actual, err := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()). - Patch(ctx, manifestObj.GetName(), types.ApplyPatchType, newData, - metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: workFieldManagerName}) - if err != nil { - klog.ErrorS(err, "failed to patch the manifest", "gvr", gvr, "obj", kLogObjRef) - return nil, false, err - } - klog.V(4).InfoS("resource patch succeeded", "gvr", gvr, "obj", kLogObjRef) - - return actual, true, err + if manifestObj.GetAnnotations()[manifestHashAnnotation] != curObj.GetAnnotations()[manifestHashAnnotation] { + return r.patchCurrentResource(ctx, gvr, manifestObj, curObj) } return curObj, false, nil } +// patchCurrentResource use server side apply to patch the current resource with the new manifest we get from the work. +func (r *ApplyWorkReconciler) patchCurrentResource(ctx context.Context, gvr schema.GroupVersionResource, manifestObj, curObj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + manifestRef := klog.ObjectRef{ + Name: manifestObj.GetName(), + Namespace: manifestObj.GetName(), + } + klog.V(5).InfoS("manifest is modified", "gvr", gvr, "manifest", manifestRef, + "new hash", manifestObj.GetAnnotations()[manifestHashAnnotation], + "existing hash", curObj.GetAnnotations()[manifestHashAnnotation]) + newData, err := manifestObj.MarshalJSON() + if err != nil { + klog.ErrorS(err, "failed to JSON marshall manifest", "gvr", gvr, "manifest", manifestRef) + return nil, false, err + } + // Use sever-side apply to be safe. + actual, patchErr := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()). + Patch(ctx, manifestObj.GetName(), types.ApplyPatchType, newData, + metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: workFieldManagerName}) + if patchErr != nil { + klog.ErrorS(patchErr, "failed to patch the manifest", "gvr", gvr, "manifest", manifestRef) + return nil, false, patchErr + } + klog.V(3).InfoS("manifest patch succeeded", "gvr", gvr, "manifest", manifestRef) + return actual, true, nil +} + // generateWorkCondition constructs the work condition based on the apply result func (r *ApplyWorkReconciler) generateWorkCondition(results []applyResult, work *workv1alpha1.Work) []error { var errs []error @@ -276,8 +337,7 @@ func (r *ApplyWorkReconciler) generateWorkCondition(results []applyResult, work if result.err != nil { errs = append(errs, result.err) } - - appliedCondition := buildAppliedStatusCondition(result.err, result.updated, result.generation) + appliedCondition := buildManifestAppliedCondition(result.err, result.updated, result.generation) manifestCondition := workv1alpha1.ManifestCondition{ Identifier: result.identifier, Conditions: []metav1.Condition{appliedCondition}, @@ -291,7 +351,7 @@ func (r *ApplyWorkReconciler) generateWorkCondition(results []applyResult, work } work.Status.ManifestConditions = manifestConditions - workCond := generateWorkAppliedStatusCondition(manifestConditions, work.Generation) + workCond := generateWorkAppliedCondition(manifestConditions, work.Generation) work.Status.Conditions = []metav1.Condition{workCond} return errs } @@ -306,64 +366,49 @@ func (r *ApplyWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -// Generates a hash of the spec annotation from an unstructured object. -func generateSpecHash(obj *unstructured.Unstructured) (string, error) { - data := obj.DeepCopy().Object - delete(data, "metadata") +// Generates a hash of the spec annotation from an unstructured object after we remove all the fields +// we have modified. +func computeManifestHash(obj *unstructured.Unstructured) (string, error) { + manifest := obj.DeepCopy() + // strip all the appliedWork owner fields + curRefs := manifest.GetOwnerReferences() + var newRefs []metav1.OwnerReference + for _, ownerRef := range curRefs { + if ownerRef.APIVersion == workv1alpha1.GroupVersion.String() && ownerRef.Kind == workv1alpha1.AppliedWorkKind { + // we skip the appliedWork owner + continue + } + newRefs = append(newRefs, ownerRef) + } + manifest.SetOwnerReferences(newRefs) + // remove the manifestHash Annotation just in case + annotation := manifest.GetAnnotations() + if annotation != nil { + delete(annotation, manifestHashAnnotation) + manifest.SetAnnotations(annotation) + } + // strip the status just in case + data := manifest.Object delete(data, "status") - + // compute the sha256 hash of the remaining data jsonBytes, err := json.Marshal(data) if err != nil { return "", err } - return fmt.Sprintf("%x", sha256.Sum256(jsonBytes)), nil } -// MergeMapOverrideWithDst merges two could be nil maps. Keep the dst for any conflicts, -func mergeMapOverrideWithDst(src, dst map[string]string) map[string]string { - if src == nil && dst == nil { - return nil - } - r := make(map[string]string) - for k, v := range src { - r[k] = v - } - // override the src for the same key - for k, v := range dst { - r[k] = v - } - return r -} - -// Determines if two arrays contain the same metav1.OwnerReference. -func hasSharedOwnerReference(owners []metav1.OwnerReference, target metav1.OwnerReference) bool { - // TODO: Move to a util directory or find an existing library. - for _, owner := range owners { - if owner.APIVersion == target.APIVersion && owner.Kind == target.Kind && owner.Name == target.Name && owner.UID == target.UID { +// isManifestManagedByWork determines if an object is managed by the work controller. +func isManifestManagedByWork(ownerRefs []metav1.OwnerReference) bool { + // an object is managed by the work if any of its owner reference is of type appliedWork + for _, ownerRef := range ownerRefs { + if ownerRef.APIVersion == workv1alpha1.GroupVersion.String() && ownerRef.Kind == workv1alpha1.AppliedWorkKind { return true } } return false } -// Inserts the owner reference into the array of existing owner references. -func insertOwnerReference(owners []metav1.OwnerReference, newOwner metav1.OwnerReference) []metav1.OwnerReference { - if hasSharedOwnerReference(owners, newOwner) { - return owners - } else { - return append(owners, newOwner) - } -} - -// Merges two owner reference arrays. -func mergeOwnerReference(owners, newOwners []metav1.OwnerReference) []metav1.OwnerReference { - for _, newOwner := range newOwners { - owners = insertOwnerReference(owners, newOwner) - } - return owners -} - // findManifestConditionByIdentifier return a ManifestCondition by identifier // 1. Find the manifest condition with the whole identifier. // 2. If identifier only has ordinal, and a matched cannot be found, return nil. @@ -389,20 +434,20 @@ func findManifestConditionByIdentifier(identifier workv1alpha1.ResourceIdentifie return nil } -// setSpecHashAnnotation computes the hash of the provided spec and sets an annotation of the -// hash on the provided unstructured objectt. This method is used internally by Apply methods. -func setSpecHashAnnotation(obj *unstructured.Unstructured) error { - specHash, err := generateSpecHash(obj) +// setManifestHashAnnotation computes the hash of the provided manifest and sets an annotation of the +// hash on the provided unstructured object. +func setManifestHashAnnotation(manifestObj *unstructured.Unstructured) error { + specHash, err := computeManifestHash(manifestObj) if err != nil { return err } - annotation := obj.GetAnnotations() + annotation := manifestObj.GetAnnotations() if annotation == nil { annotation = map[string]string{} } - annotation[specHashAnnotation] = specHash - obj.SetAnnotations(annotation) + annotation[manifestHashAnnotation] = specHash + manifestObj.SetAnnotations(annotation) return nil } @@ -411,7 +456,6 @@ func buildResourceIdentifier(index int, object *unstructured.Unstructured, gvr s identifier := workv1alpha1.ResourceIdentifier{ Ordinal: index, } - identifier.Group = object.GroupVersionKind().Group identifier.Version = object.GroupVersionKind().Version identifier.Kind = object.GroupVersionKind().Kind @@ -422,11 +466,12 @@ func buildResourceIdentifier(index int, object *unstructured.Unstructured, gvr s return identifier } -func buildAppliedStatusCondition(err error, updated bool, observedGeneration int64) metav1.Condition { +func buildManifestAppliedCondition(err error, updated bool, observedGeneration int64) metav1.Condition { if err != nil { return metav1.Condition{ Type: ConditionTypeApplied, Status: metav1.ConditionFalse, + ObservedGeneration: observedGeneration, LastTransitionTime: metav1.Now(), Reason: "appliedManifestFailed", Message: fmt.Sprintf("Failed to apply manifest: %v", err), @@ -453,9 +498,9 @@ func buildAppliedStatusCondition(err error, updated bool, observedGeneration int } } -// generateWorkAppliedStatusCondition generate appied status condition for work. +// generateWorkAppliedCondition generate appied status condition for work. // If one of the manifests is applied failed on the spoke, the applied status condition of the work is false. -func generateWorkAppliedStatusCondition(manifestConditions []workv1alpha1.ManifestCondition, observedGeneration int64) metav1.Condition { +func generateWorkAppliedCondition(manifestConditions []workv1alpha1.ManifestCondition, observedGeneration int64) metav1.Condition { for _, manifestCond := range manifestConditions { if meta.IsStatusConditionFalse(manifestCond.Conditions, ConditionTypeApplied) { return metav1.Condition{ @@ -478,25 +523,3 @@ func generateWorkAppliedStatusCondition(manifestConditions []workv1alpha1.Manife ObservedGeneration: observedGeneration, } } - -func generateWorkAvailableStatusCondition(status metav1.ConditionStatus, observedGeneration int64) metav1.Condition { - if status == metav1.ConditionTrue { - return metav1.Condition{ - Type: ConditionTypeAvailable, - Status: metav1.ConditionTrue, - LastTransitionTime: metav1.Now(), - Reason: "appliedWorkAvailable", - Message: "This workload is available", - ObservedGeneration: observedGeneration, - } - } - - return metav1.Condition{ - Type: ConditionTypeAvailable, - Status: metav1.ConditionFalse, - LastTransitionTime: metav1.Now(), - Reason: "appliedWorkFailed", - Message: "This workload is not fully available", - ObservedGeneration: observedGeneration, - } -} diff --git a/pkg/controllers/apply_controller_integratoin_test.go b/pkg/controllers/apply_controller_integratoin_test.go new file mode 100644 index 00000000..87d47803 --- /dev/null +++ b/pkg/controllers/apply_controller_integratoin_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "k8s.io/apimachinery/pkg/types" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilrand "k8s.io/apimachinery/pkg/util/rand" + workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" +) + +var _ = Describe("Work Controller", func() { + var workNamespace string + const timeout = time.Second * 30 + const interval = time.Second * 1 + + BeforeEach(func() { + workNamespace = "work-" + utilrand.String(5) + // Create namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: workNamespace, + }, + } + _, err := k8sClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + err := k8sClient.CoreV1().Namespaces().Delete(context.Background(), workNamespace, metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) + }) + + Context("Deploy manifests by work", func() { + It("Should have a configmap deployed correctly", func() { + cmName := "testcm" + cmNamespace := "default" + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cmNamespace, + }, + Data: map[string]string{ + "test": "test", + }, + } + + work := &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: workNamespace, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Object: cm}, + }, + }, + }, + }, + } + + err := workClient.Create(context.Background(), work) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + _, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.Background(), cmName, metav1.GetOptions{}) + return err + }, timeout, interval).Should(Succeed()) + + Eventually(func() error { + resultWork := workv1alpha1.Work{} + err := workClient.Get(context.Background(), types.NamespacedName{Name: work.GetName(), Namespace: workNamespace}, &resultWork) + if err != nil { + return err + } + if len(resultWork.Status.ManifestConditions) != 1 { + return fmt.Errorf("Expect the 1 manifest condition is updated") + } + + if !meta.IsStatusConditionTrue(resultWork.Status.ManifestConditions[0].Conditions, "Applied") { + return fmt.Errorf("Exepect condition status of the manifest to be true") + } + + if !meta.IsStatusConditionTrue(resultWork.Status.Conditions, "Applied") { + return fmt.Errorf("Exepect condition status of the work to be true") + } + + return nil + }, timeout, interval).Should(Succeed()) + }) + }) +}) diff --git a/pkg/controllers/apply_controller_test.go b/pkg/controllers/apply_controller_test.go index b9253847..19ef3ae3 100644 --- a/pkg/controllers/apply_controller_test.go +++ b/pkg/controllers/apply_controller_test.go @@ -1,122 +1,870 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package controllers import ( "context" + "encoding/json" "fmt" - "k8s.io/apimachinery/pkg/types" + "math" + "reflect" + "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" + "github.com/crossplane/crossplane-runtime/pkg/test" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/fake" + testingclient "k8s.io/client-go/testing" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" + "sigs.k8s.io/work-api/pkg/utils" ) -var _ = Describe("Work Controller", func() { - var workNamespace string - const timeout = time.Second * 30 - const interval = time.Second * 1 +var ( + fakeDynamicClient = fake.NewSimpleDynamicClient(runtime.NewScheme()) + appliedWork = &workv1alpha1.AppliedWork{} + ownerRef = metav1.OwnerReference{ + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: appliedWork.Kind, + } + testGvr = schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "Deployment", + } + testDeployment = appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "Deployment", + OwnerReferences: []metav1.OwnerReference{ + ownerRef, + }, + }, + Spec: appsv1.DeploymentSpec{ + MinReadySeconds: 5, + }, + } + rawTestDeployment, _ = json.Marshal(testDeployment) + testManifest = workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ + Raw: rawTestDeployment, + }} +) - BeforeEach(func() { - workNamespace = "work-" + utilrand.String(5) - // Create namespace - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: workNamespace, +// This interface is needed for testMapper abstract class. +type testMapper struct { + meta.RESTMapper +} + +func (m testMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if gk.Kind == "Deployment" { + return &meta.RESTMapping{ + Resource: testGvr, + GroupVersionKind: testDeployment.GroupVersionKind(), + Scope: nil, + }, nil + } else { + return nil, errors.New("test error: mapping does not exist.") + } +} + +func TestApplyManifest(t *testing.T) { + failMsg := "manifest apply failed" + // Manifests + rawInvalidResource, _ := json.Marshal([]byte(utilrand.String(10))) + rawMissingResource, _ := json.Marshal( + v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "core/v1", }, - } - _, err := k8sClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) + }) + InvalidManifest := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ + Raw: rawInvalidResource, + }} + MissingManifest := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ + Raw: rawMissingResource, + }} + + // GVRs + expectedGvr := schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "Deployment", + } + emptyGvr := schema.GroupVersionResource{} + + // DynamicClients + clientFailDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) + clientFailDynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New(failMsg) }) - AfterEach(func() { - // Add any teardown steps that needs to be executed after each test - err := k8sClient.CoreV1().Namespaces().Delete(context.Background(), workNamespace, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + testCases := map[string]struct { + reconciler ApplyWorkReconciler + manifestList []workv1alpha1.Manifest + generation int64 + updated bool + wantGvr schema.GroupVersionResource + wantErr error + }{ + "manifest is in proper format/ happy path": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + manifestList: append([]workv1alpha1.Manifest{}, testManifest), + generation: 0, + updated: true, + wantGvr: expectedGvr, + wantErr: nil, + }, + "manifest has incorrect syntax/ decode fail": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + manifestList: append([]workv1alpha1.Manifest{}, InvalidManifest), + generation: 0, + updated: false, + wantGvr: emptyGvr, + wantErr: &json.UnmarshalTypeError{ + Value: "string", + Type: reflect.TypeOf(map[string]interface{}{}), + }, + }, + "manifest is correct / object not mapped in restmapper / decode fail": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + manifestList: append([]workv1alpha1.Manifest{}, MissingManifest), + generation: 0, + updated: false, + wantGvr: emptyGvr, + wantErr: errors.New("failed to find group/version/resource from restmapping: test error: mapping does not exist."), + }, + "manifest is in proper format/ should fail applyUnstructured": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: clientFailDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + manifestList: append([]workv1alpha1.Manifest{}, testManifest), + generation: 0, + updated: false, + wantGvr: expectedGvr, + wantErr: errors.New(failMsg), + }, + } + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + resultList := testCase.reconciler.applyManifests(context.Background(), testCase.manifestList, ownerRef) + for _, result := range resultList { + if testCase.wantErr != nil { + assert.Containsf(t, result.err.Error(), testCase.wantErr.Error(), "Incorrect error for Testcase %s", testName) + } + assert.Equalf(t, testCase.generation, result.generation, "Testcase %s: generation incorrect", testName) + assert.Equalf(t, testCase.updated, result.updated, "Testcase %s: Updated boolean incorrect", testName) + } + }) + } +} + +func TestApplyUnstructured(t *testing.T) { + correctObj, correctDynamicClient, correctSpecHash := createObjAndDynamicClient(testManifest.Raw) + + testDeploymentDiffSpec := testDeployment.DeepCopy() + testDeploymentDiffSpec.Spec.MinReadySeconds = 0 + rawDiffSpec, _ := json.Marshal(testDeploymentDiffSpec) + testManifestDiffSpec := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ + Raw: rawDiffSpec, + }} + diffSpecObj, diffSpecDynamicClient, diffSpecHash := createObjAndDynamicClient(testManifestDiffSpec.Raw) + + patchFailClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) + patchFailClient.PrependReactor("patch", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("patch failed") }) - Context("Deploy manifests by work", func() { - It("Should have a configmap deployed correctly", func() { - cmName := "testcm" - cmNamespace := "default" - cm := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: cmName, - Namespace: cmNamespace, - }, - Data: map[string]string{ - "test": "test", + patchFailClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return true, diffSpecObj.DeepCopy(), nil + }) + + dynamicClientNotFound := fake.NewSimpleDynamicClient(runtime.NewScheme()) + dynamicClientNotFound.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return false, + nil, + &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonNotFound, + }} + }) + + dynamicClientError := fake.NewSimpleDynamicClient(runtime.NewScheme()) + dynamicClientError.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return true, + nil, + errors.New("client error") + }) + + testDeploymentWithDifferentOwner := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "Deployment", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: utilrand.String(10), + Kind: utilrand.String(10), + Name: utilrand.String(10), + UID: types.UID(utilrand.String(10)), }, + }, + }, + } + rawTestDeploymentWithDifferentOwner, _ := json.Marshal(testDeploymentWithDifferentOwner) + _, diffOwnerDynamicClient, _ := createObjAndDynamicClient(rawTestDeploymentWithDifferentOwner) + + specHashFailObj := correctObj.DeepCopy() + specHashFailObj.Object["test"] = math.Inf(1) + + testCases := map[string]struct { + reconciler ApplyWorkReconciler + workObj *unstructured.Unstructured + resultSpecHash string + resultBool bool + resultErr error + }{ + "error during SpecHash Generation / fail": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + workObj: specHashFailObj, + resultBool: false, + resultErr: errors.New("unsupported value"), + }, + "not found error looking for object / success due to creation": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: dynamicClientNotFound, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + workObj: correctObj.DeepCopy(), + resultSpecHash: correctSpecHash, + resultBool: true, + resultErr: nil, + }, + "client error looking for object / fail": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: dynamicClientError, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + workObj: correctObj.DeepCopy(), + resultBool: false, + resultErr: errors.New("client error"), + }, + "owner reference comparison failure / fail": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: diffOwnerDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + workObj: correctObj.DeepCopy(), + resultBool: false, + resultErr: errors.New("resource is not managed by the work controller"), + }, + "equal spec hash of current vs work object / succeed without updates": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: correctDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + workObj: correctObj.DeepCopy(), + resultSpecHash: correctSpecHash, + resultBool: false, + resultErr: nil, + }, + "unequal spec hash of current vs work object / client patch fail": { + reconciler: ApplyWorkReconciler{ + spokeDynamicClient: patchFailClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + workObj: correctObj.DeepCopy(), + resultBool: false, + resultErr: errors.New("patch failed"), + }, + "happy path - with updates": { + reconciler: ApplyWorkReconciler{ + spokeDynamicClient: diffSpecDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + workObj: &correctObj, + resultSpecHash: diffSpecHash, + resultBool: true, + resultErr: nil, + }, + } + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + applyResult, applyResultBool, err := testCase.reconciler.applyUnstructured(context.Background(), testGvr, testCase.workObj) + assert.Equalf(t, testCase.resultBool, applyResultBool, "updated boolean not matching for Testcase %s", testName) + if testCase.resultErr != nil { + assert.Containsf(t, err.Error(), testCase.resultErr.Error(), "error not matching for Testcase %s", testName) + } else { + assert.Truef(t, err == nil, "err is not nil for Testcase %s", testName) + assert.Truef(t, applyResult != nil, "applyResult is not nil for Testcase %s", testName) + assert.Equalf(t, testCase.resultSpecHash, applyResult.GetAnnotations()[manifestHashAnnotation], + "specHash not matching for Testcase %s", testName) + assert.Equalf(t, ownerRef, applyResult.GetOwnerReferences()[0], "ownerRef not matching for Testcase %s", testName) } + }) + } +} - work := &workv1alpha1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-work", - Namespace: workNamespace, - }, - Spec: workv1alpha1.WorkSpec{ - Workload: workv1alpha1.WorkloadTemplate{ - Manifests: []workv1alpha1.Manifest{ - { - RawExtension: runtime.RawExtension{Object: cm}, - }, - }, - }, +func TestReconcile(t *testing.T) { + failMsg := "manifest apply failed" + workNamespace := utilrand.String(10) + workName := utilrand.String(10) + appliedWorkName := utilrand.String(10) + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: workNamespace, + Name: workName, + }, + } + wrongReq := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: utilrand.String(10), + Name: utilrand.String(10), + }, + } + invalidReq := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "", + Name: "", + }, + } + + getMock := func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + if key.Namespace != workNamespace { + return &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonNotFound, + }} + } + o, _ := obj.(*workv1alpha1.Work) + *o = workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workNamespace, + Name: workName, + Finalizers: []string{"multicluster.x-k8s.io/work-cleanup"}, + }, + Spec: workv1alpha1.WorkSpec{Workload: workv1alpha1.WorkloadTemplate{Manifests: []workv1alpha1.Manifest{testManifest}}}, + } + return nil + } + + happyDeployment := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "Deployment", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: "AppliedWork", + Name: appliedWorkName, }, - } + }, + }, + Spec: appsv1.DeploymentSpec{ + MinReadySeconds: 5, + }, + } + rawHappyDeployment, _ := json.Marshal(happyDeployment) + happyManifest := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ + Raw: rawHappyDeployment, + }} + _, happyDynamicClient, _ := createObjAndDynamicClient(happyManifest.Raw) - err := workClient.Create(context.Background(), work) - Expect(err).ToNot(HaveOccurred()) + getMockAppliedWork := func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - Eventually(func() error { - _, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.Background(), cmName, metav1.GetOptions{}) - return err - }, timeout, interval).Should(Succeed()) + if key.Name != workName { + return &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonNotFound, + }} + } + o, _ := obj.(*workv1alpha1.AppliedWork) + *o = workv1alpha1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: appliedWorkName, + }, + Spec: workv1alpha1.AppliedWorkSpec{ + WorkName: workNamespace, + WorkNamespace: workName, + }, + } + return nil + } - Eventually(func() error { - resultWork := workv1alpha1.Work{} - err := workClient.Get(context.Background(), types.NamespacedName{Name: work.GetName(), Namespace: workNamespace}, &resultWork) - if err != nil { - return err - } - if len(resultWork.Status.ManifestConditions) != 1 { - return fmt.Errorf("Expect the 1 manifest condition is updated") - } + clientFailDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) + clientFailDynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New(failMsg) + }) - if !meta.IsStatusConditionTrue(resultWork.Status.ManifestConditions[0].Conditions, "Applied") { - return fmt.Errorf("Exepect condition status of the manifest to be true") + testCases := map[string]struct { + reconciler ApplyWorkReconciler + req ctrl.Request + wantErr error + requeue bool + }{ + "controller is being stopped": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{}, + spokeDynamicClient: happyDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: false, + }, + req: req, + wantErr: nil, + requeue: true, + }, + "work cannot be retrieved, client failed due to client error": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + return fmt.Errorf("client failing") + }, + }, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + req: invalidReq, + wantErr: errors.New("client failing"), + }, + "work cannot be retrieved, client failed due to not found error": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: getMock, + }, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + }, + req: wrongReq, + wantErr: nil, + }, + "work without finalizer / no error": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + o, _ := obj.(*workv1alpha1.Work) + *o = workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workNamespace, + Name: workName, + }, + } + return nil + }, + }, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + }, + req: req, + wantErr: nil, + }, + "work with non-zero deletion-timestamp / succeed": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + o, _ := obj.(*workv1alpha1.Work) + *o = workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: workNamespace, + Name: workName, + Finalizers: []string{"multicluster.x-k8s.io/work-cleanup"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + return nil + }, + }, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{}, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + }, + req: req, + wantErr: nil, + }, + "Retrieving appliedwork fails": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: getMock, + }, + spokeDynamicClient: fakeDynamicClient, + spokeClient: &test.MockClient{ + MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + return &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Reason: metav1.StatusReasonNotFound, + }} + }, + }, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + req: req, + wantErr: errors.New("manifest apply unwarranted; the spec has not changed"), + }, + "ApplyManifest fails": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: getMock, + MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return nil + }, + }, + spokeDynamicClient: clientFailDynamicClient, + spokeClient: &test.MockClient{ + MockGet: getMockAppliedWork, + MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return nil + }, + }, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(2), + joined: true, + }, + req: req, + wantErr: errors.New(failMsg), + }, + "client update fails": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: getMock, + MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return errors.New("failed") + }, + }, + spokeDynamicClient: clientFailDynamicClient, + spokeClient: &test.MockClient{ + MockGet: getMockAppliedWork, + }, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(2), + joined: true, + }, + req: req, + wantErr: errors.New("failed"), + }, + "Happy Path": { + reconciler: ApplyWorkReconciler{ + client: &test.MockClient{ + MockGet: getMock, + MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return nil + }, + }, + spokeDynamicClient: happyDynamicClient, + spokeClient: &test.MockClient{ + MockGet: getMockAppliedWork, + }, + restMapper: testMapper{}, + recorder: utils.NewFakeRecorder(1), + joined: true, + }, + req: req, + wantErr: nil, + requeue: true, + }, + } + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + ctrlResult, err := testCase.reconciler.Reconcile(context.Background(), testCase.req) + if testCase.wantErr != nil { + assert.Containsf(t, err.Error(), testCase.wantErr.Error(), "incorrect error for Testcase %s", testName) + } else { + if testCase.requeue { + assert.Equal(t, ctrl.Result{RequeueAfter: time.Minute * 5}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) } + assert.Equalf(t, false, ctrlResult.Requeue, "incorrect ctrlResult for Testcase %s", testName) + } + }) + } +} - if !meta.IsStatusConditionTrue(resultWork.Status.Conditions, "Applied") { - return fmt.Errorf("Exepect condition status of the work to be true") - } +func TestSetManifestHashAnnotation(t *testing.T) { + // basic setup + appliedOwnerRef := metav1.OwnerReference{ + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: workv1alpha1.AppliedWorkKind, + Name: utilrand.String(10), + UID: types.UID(utilrand.String(10)), + } + manifestObj := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Deployment", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: utilrand.String(10), + Kind: utilrand.String(10), + Name: utilrand.String(10), + UID: types.UID(utilrand.String(10)), + }, + }, + Annotations: map[string]string{utilrand.String(10): utilrand.String(10)}, + }, + Spec: appsv1.DeploymentSpec{ + Paused: true, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, + }, + } + // the normal case that the curObj has an annotation and owner reference with status + curObj := manifestObj.DeepCopy() + curObj.Annotations[manifestHashAnnotation] = utilrand.String(10) + curObj.OwnerReferences = append(curObj.OwnerReferences, appliedOwnerRef) + curObj.Status.Replicas = 10 + curObj.Status.ReadyReplicas = 1 - return nil - }, timeout, interval).Should(Succeed()) + tests := map[string]struct { + manifestObj interface{} + curObj interface{} + isSame bool + }{ + "the normal case after an object is applied": { + manifestObj: &manifestObj, + curObj: curObj, + isSame: true, + }, + "current is owned by multiple appliedWorks": { + manifestObj: &manifestObj, + curObj: func() *appsv1.Deployment { + extraObj := curObj.DeepCopy() + extraObj.OwnerReferences = append(extraObj.OwnerReferences, appliedOwnerRef) + return extraObj + }(), + isSame: true, + }, + "current status changed": { + manifestObj: &manifestObj, + curObj: func() *appsv1.Deployment { + extraObj := curObj.DeepCopy() + extraObj.Status.ReadyReplicas = 10 + return extraObj + }(), + isSame: true, + }, + "current's appliedWork owner is removed by the user": { + manifestObj: &manifestObj, + curObj: func() *appsv1.Deployment { + noObj := curObj.DeepCopy() + noObj.SetOwnerReferences(manifestObj.GetOwnerReferences()) + return noObj + }(), + isSame: true, + }, + "current's hash annotation is changed by the user": { + manifestObj: &manifestObj, + curObj: func() *appsv1.Deployment { + alterObj := curObj.DeepCopy() + alterObj.Annotations[manifestHashAnnotation] = utilrand.String(10) + return alterObj + }(), + isSame: true, + }, + "manifest is has changed ownership": { + manifestObj: func() *appsv1.Deployment { + alterObj := manifestObj.DeepCopy() + alterObj.OwnerReferences[0].APIVersion = utilrand.String(10) + return alterObj + }(), + curObj: curObj, + isSame: false, + }, + "manifest has a different label": { + manifestObj: func() *appsv1.Deployment { + alterObj := manifestObj.DeepCopy() + alterObj.SetLabels(map[string]string{utilrand.String(5): utilrand.String(10)}) + return alterObj + }(), + curObj: curObj, + isSame: false, + }, + "manifest has a different annotation": { + manifestObj: func() *appsv1.Deployment { + alterObj := manifestObj.DeepCopy() + alterObj.SetAnnotations(map[string]string{utilrand.String(5): utilrand.String(10)}) + return alterObj + }(), + curObj: curObj, + isSame: false, + }, + "manifest has a different spec": { + manifestObj: func() *appsv1.Deployment { + alterObj := manifestObj.DeepCopy() + alterObj.Spec.Replicas = pointer.Int32Ptr(100) + return alterObj + }(), + curObj: curObj, + isSame: false, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + var uCurObj, uManifestObj unstructured.Unstructured + uManifestObj.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(tt.manifestObj) + uCurObj.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(tt.curObj) + setManifestHashAnnotation(&uManifestObj) + setManifestHashAnnotation(&uCurObj) + manifestHash := uManifestObj.GetAnnotations()[manifestHashAnnotation] + curHash := uCurObj.GetAnnotations()[manifestHashAnnotation] + assert.Equalf(t, tt.isSame, manifestHash == curHash, "manifestObj = (%v), curObj = (%+v) ", tt.manifestObj, tt.curObj) + }) + } +} + +func TestIsManifestManagedByWork(t *testing.T) { + tests := map[string]struct { + ownerRefs []metav1.OwnerReference + isManaged bool + }{ + "empty owner list": { + ownerRefs: nil, + isManaged: false, + }, + "no appliedWork": { + ownerRefs: []metav1.OwnerReference{ + { + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: workv1alpha1.WorkKind, + }, + }, + isManaged: false, + }, + "one appliedWork": { + ownerRefs: []metav1.OwnerReference{ + { + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: workv1alpha1.AppliedWorkKind, + Name: utilrand.String(10), + UID: types.UID(utilrand.String(10)), + }, + }, + isManaged: true, + }, + "multiple appliedWork": { + ownerRefs: []metav1.OwnerReference{ + { + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: workv1alpha1.AppliedWorkKind, + Name: utilrand.String(10), + UID: types.UID(utilrand.String(10)), + }, + { + APIVersion: workv1alpha1.GroupVersion.String(), + Kind: workv1alpha1.AppliedWorkKind, + UID: types.UID(utilrand.String(10)), + }, + }, + isManaged: true, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + assert.Equalf(t, tt.isManaged, isManifestManagedByWork(tt.ownerRefs), "isManifestManagedByWork(%v)", tt.ownerRefs) }) + } +} + +func createObjAndDynamicClient(rawManifest []byte) (unstructured.Unstructured, dynamic.Interface, string) { + unstructuredObj := &unstructured.Unstructured{} + _ = unstructuredObj.UnmarshalJSON(rawManifest) + validSpecHash, _ := computeManifestHash(unstructuredObj) + unstructuredObj.SetAnnotations(map[string]string{manifestHashAnnotation: validSpecHash}) + dynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) + dynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return true, unstructuredObj, nil + }) + dynamicClient.PrependReactor("patch", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { + return true, unstructuredObj, nil }) -}) + return *unstructuredObj, dynamicClient, validSpecHash +} diff --git a/pkg/controllers/apply_controller_unit_test.go b/pkg/controllers/apply_controller_unit_test.go deleted file mode 100644 index aca72e57..00000000 --- a/pkg/controllers/apply_controller_unit_test.go +++ /dev/null @@ -1,684 +0,0 @@ -package controllers - -import ( - "context" - "encoding/json" - "fmt" - "math" - "reflect" - "testing" - "time" - - "github.com/crossplane/crossplane-runtime/pkg/test" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/fake" - testingclient "k8s.io/client-go/testing" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - "sigs.k8s.io/work-api/pkg/utils" -) - -var ( - fakeDynamicClient = fake.NewSimpleDynamicClient(runtime.NewScheme()) - appliedWork = &workv1alpha1.AppliedWork{} - ownerRef = metav1.OwnerReference{ - APIVersion: workv1alpha1.GroupVersion.String(), - Kind: appliedWork.Kind, - } - testGvr = schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "Deployment", - } - testDeployment = appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "Deployment", - OwnerReferences: []metav1.OwnerReference{ - ownerRef, - }, - }, - Spec: appsv1.DeploymentSpec{ - MinReadySeconds: 5, - }, - } - rawTestDeployment, _ = json.Marshal(testDeployment) - testManifest = workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ - Raw: rawTestDeployment, - }} -) - -// This interface is needed for testMapper abstract class. -type testMapper struct { - meta.RESTMapper -} - -func (m testMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if gk.Kind == "Deployment" { - return &meta.RESTMapping{ - Resource: testGvr, - GroupVersionKind: testDeployment.GroupVersionKind(), - Scope: nil, - }, nil - } else { - return nil, errors.New("test error: mapping does not exist.") - } -} - -func TestApplyManifest(t *testing.T) { - failMsg := "manifest apply failed" - // Manifests - rawInvalidResource, _ := json.Marshal([]byte(rand.String(10))) - rawMissingResource, _ := json.Marshal( - v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "core/v1", - }, - }) - InvalidManifest := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ - Raw: rawInvalidResource, - }} - MissingManifest := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ - Raw: rawMissingResource, - }} - - // GVRs - expectedGvr := schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "Deployment", - } - emptyGvr := schema.GroupVersionResource{} - - // DynamicClients - clientFailDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) - clientFailDynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New(failMsg) - }) - - testCases := map[string]struct { - reconciler ApplyWorkReconciler - manifestList []workv1alpha1.Manifest - generation int64 - updated bool - wantGvr schema.GroupVersionResource - wantErr error - }{ - "manifest is in proper format/ happy path": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - manifestList: append([]workv1alpha1.Manifest{}, testManifest), - generation: 0, - updated: true, - wantGvr: expectedGvr, - wantErr: nil, - }, - "manifest has incorrect syntax/ decode fail": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - manifestList: append([]workv1alpha1.Manifest{}, InvalidManifest), - generation: 0, - updated: false, - wantGvr: emptyGvr, - wantErr: &json.UnmarshalTypeError{ - Value: "string", - Type: reflect.TypeOf(map[string]interface{}{}), - }, - }, - "manifest is correct / object not mapped in restmapper / decode fail": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - manifestList: append([]workv1alpha1.Manifest{}, MissingManifest), - generation: 0, - updated: false, - wantGvr: emptyGvr, - wantErr: errors.New("failed to find group/version/resource from restmapping: test error: mapping does not exist."), - }, - "manifest is in proper format/ should fail applyUnstructured": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: clientFailDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - manifestList: append([]workv1alpha1.Manifest{}, testManifest), - generation: 0, - updated: false, - wantGvr: expectedGvr, - wantErr: errors.New(failMsg), - }, - } - - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - resultList := testCase.reconciler.applyManifests(context.Background(), testCase.manifestList, ownerRef) - for _, result := range resultList { - if testCase.wantErr != nil { - assert.Containsf(t, result.err.Error(), testCase.wantErr.Error(), "Incorrect error for Testcase %s", testName) - } - assert.Equalf(t, testCase.generation, result.generation, "Testcase %s: generation incorrect", testName) - assert.Equalf(t, testCase.updated, result.updated, "Testcase %s: Updated boolean incorrect", testName) - } - }) - } -} - -func TestApplyUnstructured(t *testing.T) { - correctObj, correctDynamicClient, correctSpecHash := createObjAndDynamicClient(testManifest.Raw) - - testDeploymentDiffSpec := testDeployment.DeepCopy() - testDeploymentDiffSpec.Spec.MinReadySeconds = 0 - rawDiffSpec, _ := json.Marshal(testDeploymentDiffSpec) - testManifestDiffSpec := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ - Raw: rawDiffSpec, - }} - diffSpecObj, diffSpecDynamicClient, diffSpecHash := createObjAndDynamicClient(testManifestDiffSpec.Raw) - - patchFailClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) - patchFailClient.PrependReactor("patch", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New("patch failed") - }) - patchFailClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, diffSpecObj.DeepCopy(), nil - }) - - dynamicClientNotFound := fake.NewSimpleDynamicClient(runtime.NewScheme()) - dynamicClientNotFound.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return false, - nil, - &apierrors.StatusError{ - ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Reason: metav1.StatusReasonNotFound, - }} - }) - - dynamicClientError := fake.NewSimpleDynamicClient(runtime.NewScheme()) - dynamicClientError.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, - nil, - errors.New("client error") - }) - - testDeploymentWithDifferentOwner := appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "Deployment", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: rand.String(10), - Kind: rand.String(10), - Name: rand.String(10), - UID: types.UID(rand.String(10)), - }, - }, - }, - } - rawTestDeploymentWithDifferentOwner, _ := json.Marshal(testDeploymentWithDifferentOwner) - _, diffOwnerDynamicClient, _ := createObjAndDynamicClient(rawTestDeploymentWithDifferentOwner) - - specHashFailObj := correctObj.DeepCopy() - specHashFailObj.Object["test"] = math.Inf(1) - - testCases := map[string]struct { - reconciler ApplyWorkReconciler - workObj *unstructured.Unstructured - resultSpecHash string - resultBool bool - resultErr error - }{ - "error during SpecHash Generation / fail": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - workObj: specHashFailObj, - resultBool: false, - resultErr: errors.New("unsupported value"), - }, - "not found error looking for object / success due to creation": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: dynamicClientNotFound, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - workObj: correctObj.DeepCopy(), - resultSpecHash: correctSpecHash, - resultBool: true, - resultErr: nil, - }, - "client error looking for object / fail": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: dynamicClientError, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - workObj: correctObj.DeepCopy(), - resultBool: false, - resultErr: errors.New("client error"), - }, - "owner reference comparison failure / fail": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: diffOwnerDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - workObj: correctObj.DeepCopy(), - resultBool: false, - resultErr: errors.New("resource is not managed by the work controller"), - }, - "equal spec hash of current vs work object / succeed without updates": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: correctDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - workObj: correctObj.DeepCopy(), - resultSpecHash: correctSpecHash, - resultBool: false, - resultErr: nil, - }, - "unequal spec hash of current vs work object / client patch fail": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: patchFailClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - workObj: correctObj.DeepCopy(), - resultBool: false, - resultErr: errors.New("patch failed"), - }, - "happy path - with updates": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: diffSpecDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - workObj: &correctObj, - resultSpecHash: diffSpecHash, - resultBool: true, - resultErr: nil, - }, - } - - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - applyResult, applyResultBool, err := testCase.reconciler.applyUnstructured(context.Background(), testGvr, testCase.workObj) - assert.Equalf(t, testCase.resultBool, applyResultBool, "updated boolean not matching for Testcase %s", testName) - if testCase.resultErr != nil { - assert.Containsf(t, err.Error(), testCase.resultErr.Error(), "error not matching for Testcase %s", testName) - } else { - assert.Equalf(t, testCase.resultSpecHash, applyResult.GetAnnotations()["multicluster.x-k8s.io/spec-hash"], - "specHash not matching for Testcase %s", testName) - assert.Equalf(t, ownerRef, applyResult.GetOwnerReferences()[0], "ownerRef not matching for Testcase %s", testName) - } - }) - } -} - -func TestReconcile(t *testing.T) { - failMsg := "manifest apply failed" - workNamespace := rand.String(10) - workName := rand.String(10) - appliedWorkName := rand.String(10) - req := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: workNamespace, - Name: workName, - }, - } - wrongReq := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: rand.String(10), - Name: rand.String(10), - }, - } - invalidReq := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: "", - Name: "", - }, - } - - getMock := func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - if key.Namespace != workNamespace { - return &apierrors.StatusError{ - ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Reason: metav1.StatusReasonNotFound, - }} - } - o, _ := obj.(*workv1alpha1.Work) - *o = workv1alpha1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: workNamespace, - Name: workName, - Finalizers: []string{"multicluster.x-k8s.io/work-cleanup"}, - }, - Spec: workv1alpha1.WorkSpec{Workload: workv1alpha1.WorkloadTemplate{Manifests: []workv1alpha1.Manifest{testManifest}}}, - } - return nil - } - - happyDeployment := appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "Deployment", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: workv1alpha1.GroupVersion.String(), - Kind: "AppliedWork", - Name: appliedWorkName, - }, - }, - }, - Spec: appsv1.DeploymentSpec{ - MinReadySeconds: 5, - }, - } - rawHappyDeployment, _ := json.Marshal(happyDeployment) - happyManifest := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ - Raw: rawHappyDeployment, - }} - _, happyDynamicClient, _ := createObjAndDynamicClient(happyManifest.Raw) - - getMockAppliedWork := func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - - if key.Name != workName { - return &apierrors.StatusError{ - ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Reason: metav1.StatusReasonNotFound, - }} - } - o, _ := obj.(*workv1alpha1.AppliedWork) - *o = workv1alpha1.AppliedWork{ - ObjectMeta: metav1.ObjectMeta{ - Name: appliedWorkName, - }, - Spec: workv1alpha1.AppliedWorkSpec{ - WorkName: workNamespace, - WorkNamespace: workName, - }, - } - return nil - } - - clientFailDynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) - clientFailDynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New(failMsg) - }) - - testCases := map[string]struct { - reconciler ApplyWorkReconciler - req ctrl.Request - wantErr error - requeue bool - }{ - "controller is being stopped": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: happyDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: false, - }, - req: req, - wantErr: nil, - requeue: true, - }, - "work cannot be retrieved, client failed due to client error": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - return fmt.Errorf("client failing") - }, - }, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - req: invalidReq, - wantErr: errors.New("client failing"), - }, - "work cannot be retrieved, client failed due to not found error": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: getMock, - }, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - }, - req: wrongReq, - wantErr: nil, - }, - "work without finalizer / no error": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - o, _ := obj.(*workv1alpha1.Work) - *o = workv1alpha1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: workNamespace, - Name: workName, - }, - } - return nil - }, - }, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - }, - req: req, - wantErr: nil, - }, - "work with non-zero deletion-timestamp / succeed": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - o, _ := obj.(*workv1alpha1.Work) - *o = workv1alpha1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: workNamespace, - Name: workName, - Finalizers: []string{"multicluster.x-k8s.io/work-cleanup"}, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - }, - } - return nil - }, - }, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - }, - req: req, - wantErr: nil, - }, - "Retrieving appliedwork fails": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: getMock, - }, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { - return &apierrors.StatusError{ - ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Reason: metav1.StatusReasonNotFound, - }} - }, - }, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - req: req, - wantErr: errors.New("manifest apply unwarranted; the spec has not changed"), - }, - "ApplyManifest fails": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: getMock, - MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { - return nil - }, - }, - spokeDynamicClient: clientFailDynamicClient, - spokeClient: &test.MockClient{ - MockGet: getMockAppliedWork, - MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { - return nil - }, - }, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(2), - Joined: true, - }, - req: req, - wantErr: errors.New(failMsg), - }, - "client update fails": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: getMock, - MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { - return errors.New("failed") - }, - }, - spokeDynamicClient: clientFailDynamicClient, - spokeClient: &test.MockClient{ - MockGet: getMockAppliedWork, - }, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(2), - Joined: true, - }, - req: req, - wantErr: errors.New("failed"), - }, - "Happy Path": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{ - MockGet: getMock, - MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { - return nil - }, - }, - spokeDynamicClient: happyDynamicClient, - spokeClient: &test.MockClient{ - MockGet: getMockAppliedWork, - }, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - Joined: true, - }, - req: req, - wantErr: nil, - requeue: true, - }, - } - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - ctrlResult, err := testCase.reconciler.Reconcile(context.Background(), testCase.req) - if testCase.wantErr != nil { - assert.Containsf(t, err.Error(), testCase.wantErr.Error(), "incorrect error for Testcase %s", testName) - } else { - if testCase.requeue { - assert.Equal(t, ctrl.Result{RequeueAfter: time.Minute * 5}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) - } - assert.Equalf(t, false, ctrlResult.Requeue, "incorrect ctrlResult for Testcase %s", testName) - } - }) - } -} - -func createObjAndDynamicClient(rawManifest []byte) (unstructured.Unstructured, dynamic.Interface, string) { - unstructuredObj := &unstructured.Unstructured{} - _ = unstructuredObj.UnmarshalJSON(rawManifest) - validSpecHash, _ := generateSpecHash(unstructuredObj) - unstructuredObj.SetAnnotations(map[string]string{"multicluster.x-k8s.io/spec-hash": validSpecHash}) - dynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) - dynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, unstructuredObj, nil - }) - dynamicClient.PrependReactor("patch", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, unstructuredObj, nil - }) - return *unstructuredObj, dynamicClient, validSpecHash -} diff --git a/pkg/controllers/finalize_controller_unit_test.go b/pkg/controllers/finalize_controller_test.go similarity index 100% rename from pkg/controllers/finalize_controller_unit_test.go rename to pkg/controllers/finalize_controller_test.go diff --git a/pkg/controllers/manager.go b/pkg/controllers/manager.go index a66abdfe..c0da0f6a 100644 --- a/pkg/controllers/manager.go +++ b/pkg/controllers/manager.go @@ -30,8 +30,8 @@ import ( ) const ( - workFinalizer = "multicluster.x-k8s.io/work-cleanup" - specHashAnnotation = "multicluster.x-k8s.io/spec-hash" + workFinalizer = "multicluster.x-k8s.io/work-cleanup" + manifestHashAnnotation = "multicluster.x-k8s.io/spec-hash" ConditionTypeApplied = "Applied" ConditionTypeAvailable = "Available" @@ -95,16 +95,6 @@ func Start(ctx context.Context, hubCfg, spokeCfg *rest.Config, setupLog logr.Log return err } - if err = NewFinalizeWorkReconciler( - hubMgr.GetClient(), - spokeClient, - hubMgr.GetEventRecorderFor("WorkFinalizer_controller"), - true, - ).SetupWithManager(hubMgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "WorkFinalize") - return err - } - klog.Info("starting hub manager") defer klog.Info("shutting down hub manager") if err := hubMgr.Start(ctx); err != nil { diff --git a/pkg/controllers/resource_tracker.go b/pkg/controllers/resource_tracker.go index cf704d4e..055b2ea3 100644 --- a/pkg/controllers/resource_tracker.go +++ b/pkg/controllers/resource_tracker.go @@ -30,6 +30,7 @@ import ( workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" ) +// TODO: merge this back to the work status controller type appliedResourceTracker struct { hubClient client.Client spokeClient client.Client @@ -49,30 +50,30 @@ func (r *appliedResourceTracker) fetchWorks(ctx context.Context, nsWorkName type work := &workapi.Work{} appliedWork := &workapi.AppliedWork{} - // fetch work CR from the member cluster + // fetch work CR from the hub cluster err := r.hubClient.Get(ctx, nsWorkName, work) switch { case errors.IsNotFound(err): - klog.InfoS("work does not exist", "item", nsWorkName) + klog.V(4).InfoS("work resource does not exist", "item", nsWorkName) work = nil case err != nil: - klog.ErrorS(err, "failed to get work", "item", nsWorkName) + klog.ErrorS(err, "failed to get the work obj", "item", nsWorkName) return nil, nil, err default: - klog.V(8).InfoS("work exists in the hub cluster", "item", nsWorkName) + klog.V(5).InfoS("work exists in the hub cluster", "item", nsWorkName) } // fetch appliedWork CR from the member cluster err = r.spokeClient.Get(ctx, nsWorkName, appliedWork) switch { case errors.IsNotFound(err): - klog.InfoS("appliedWork does not exist", "item", nsWorkName) + klog.V(4).InfoS("appliedWork obj does not exist", "item", nsWorkName) appliedWork = nil case err != nil: klog.ErrorS(err, "failed to get appliedWork", "item", nsWorkName) return nil, nil, err default: - klog.V(8).InfoS("appliedWork exists in the member cluster", "item", nsWorkName) + klog.V(5).InfoS("appliedWork exists in the member cluster", "item", nsWorkName) } if err := checkConsistentExist(work, appliedWork, nsWorkName); err != nil { diff --git a/pkg/controllers/suite_test.go b/pkg/controllers/suite_test.go index 2df8a524..41e90177 100644 --- a/pkg/controllers/suite_test.go +++ b/pkg/controllers/suite_test.go @@ -32,8 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" ) @@ -60,8 +58,6 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func(done Done) { - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) - By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("../../", "config", "crd")}, diff --git a/pkg/controllers/work_status_controller.go b/pkg/controllers/work_status_controller.go index e3d22352..40e3e8e8 100644 --- a/pkg/controllers/work_status_controller.go +++ b/pkg/controllers/work_status_controller.go @@ -19,7 +19,6 @@ package controllers import ( "context" "fmt" - v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,9 +33,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - "time" - workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" + "time" ) // WorkStatusReconciler reconciles a Work object when its status changes @@ -44,7 +42,7 @@ type WorkStatusReconciler struct { appliedResourceTracker recorder record.EventRecorder concurrency int - Joined bool + joined bool } func NewWorkStatusReconciler(hubClient client.Client, spokeDynamicClient dynamic.Interface, spokeClient client.Client, restMapper meta.RESTMapper, recorder record.EventRecorder, concurrency int, joined bool) *WorkStatusReconciler { @@ -57,18 +55,17 @@ func NewWorkStatusReconciler(hubClient client.Client, spokeDynamicClient dynamic }, recorder: recorder, concurrency: concurrency, - Joined: joined, + joined: joined, } } // Reconcile implement the control loop logic for Work Status. func (r *WorkStatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.InfoS("Work Status controller reconcile loop triggered", "item", req.NamespacedName) - - if !r.Joined { - klog.InfoS("work status controller is not started yet") + if !r.joined { + klog.V(3).InfoS("workStatus controller is not started yet, will requeue the request") return ctrl.Result{RequeueAfter: time.Second * 5}, nil } + klog.InfoS("workStatus controller reconcile loop is triggered", "item", req.NamespacedName) work, appliedWork, err := r.fetchWorks(ctx, req.NamespacedName) if err != nil { @@ -81,19 +78,24 @@ func (r *WorkStatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) kLogObjRef := klog.KObj(work) // from now on both work objects should exist - newRes, staleRes := r.calculateNewAppliedWork(work, appliedWork) + newRes, staleRes, genErr := r.generateDiff(ctx, work, appliedWork) + if genErr != nil { + klog.ErrorS(err, "failed to generate the diff between work status and appliedWork status", work.Kind, kLogObjRef) + return ctrl.Result{}, err + } + // delete all the manifests that should not be in the cluster. if err = r.deleteStaleManifest(ctx, staleRes); err != nil { klog.ErrorS(err, "resource garbage-collection incomplete; some Work owned resources could not be deleted", work.Kind, kLogObjRef) // we can't proceed to update the applied return ctrl.Result{}, err - } else if len(staleRes) > 0 && err == nil { - // TODO: Specify which manifest was deleted. - msg := "successfully garbage-collected all stale manifests" - klog.InfoS(msg, work.Kind, kLogObjRef) - r.recorder.Event(work, v1.EventTypeNormal, "ResourceGarbageCollectionComplete", msg) + } else if len(staleRes) > 0 { + klog.V(3).InfoS("successfully garbage-collected all stale manifests", work.Kind, kLogObjRef, "number of GCed res", len(staleRes)) + for _, res := range staleRes { + klog.V(5).InfoS("successfully garbage-collected a stale manifest", work.Kind, kLogObjRef, "res", res) + } } - // update the appliedWork with the new work + // update the appliedWork with the new work after the stales are deleted appliedWork.Status.AppliedResources = newRes if err = r.spokeClient.Status().Update(ctx, appliedWork, &client.UpdateOptions{}); err != nil { klog.ErrorS(err, "failed to update appliedWork status", appliedWork.Kind, appliedWork.GetName()) @@ -103,13 +105,13 @@ func (r *WorkStatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } -// calculateNewAppliedWork check the difference between what is supposed to be applied (tracked by the work CR status) +// generateDiff check the difference between what is supposed to be applied (tracked by the work CR status) // and what was applied in the member cluster (tracked by the appliedWork CR). // What is in the `appliedWork` but not in the `work` should be deleted from the member cluster // What is in the `work` but not in the `appliedWork` should be added to the appliedWork status -func (r *WorkStatusReconciler) calculateNewAppliedWork(work *workapi.Work, appliedWork *workapi.AppliedWork) ([]workapi.AppliedResourceMeta, []workapi.AppliedResourceMeta) { +func (r *WorkStatusReconciler) generateDiff(ctx context.Context, work *workapi.Work, appliedWork *workapi.AppliedWork) ([]workapi.AppliedResourceMeta, []workapi.AppliedResourceMeta, error) { var staleRes, newRes []workapi.AppliedResourceMeta - + // for every resource applied in cluster, check if it's still in the work's manifest condition for _, resourceMeta := range appliedWork.Status.AppliedResources { resStillExist := false for _, manifestCond := range work.Status.ManifestConditions { @@ -119,15 +121,16 @@ func (r *WorkStatusReconciler) calculateNewAppliedWork(work *workapi.Work, appli } } if !resStillExist { - klog.V(3).InfoS("find an orphaned resource in the member cluster", + klog.V(5).InfoS("find an orphaned resource in the member cluster", "parent resource", work.GetName(), "orphaned resource", resourceMeta.ResourceIdentifier) staleRes = append(staleRes, resourceMeta) } } - + // add every resource in the work's manifest condition that is applied successfully back to the appliedWork status for _, manifestCond := range work.Status.ManifestConditions { ac := meta.FindStatusCondition(manifestCond.Conditions, ConditionTypeApplied) if ac == nil { + // should not happen klog.ErrorS(fmt.Errorf("resource is missing applied condition"), "applied condition missing", "resource", manifestCond.Identifier) continue } @@ -145,14 +148,28 @@ func (r *WorkStatusReconciler) calculateNewAppliedWork(work *workapi.Work, appli if !resRecorded { klog.V(5).InfoS("discovered a new resource", "parent Work", work.GetName(), "discovered resource", manifestCond.Identifier) + obj, err := r.spokeDynamicClient.Resource(schema.GroupVersionResource{ + Group: manifestCond.Identifier.Group, + Version: manifestCond.Identifier.Version, + Resource: manifestCond.Identifier.Resource, + }).Namespace(manifestCond.Identifier.Namespace).Get(ctx, manifestCond.Identifier.Name, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + klog.V(4).InfoS("the manifest resource is deleted", "manifest", manifestCond.Identifier) + continue + case err != nil: + klog.ErrorS(err, "failed to retrieve the manifest", "manifest", manifestCond.Identifier) + return nil, nil, err + } newRes = append(newRes, workapi.AppliedResourceMeta{ ResourceIdentifier: manifestCond.Identifier, + UID: obj.GetUID(), }) } } } - return newRes, staleRes + return newRes, staleRes, nil } func (r *WorkStatusReconciler) deleteStaleManifest(ctx context.Context, staleManifests []workapi.AppliedResourceMeta) error { @@ -191,7 +208,7 @@ func (r *WorkStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -// We only need to process the update event +// UpdateOnlyPredicate is used since we only need to process the update event type UpdateOnlyPredicate struct { predicate.Funcs } @@ -203,3 +220,14 @@ func (UpdateOnlyPredicate) Create(event.CreateEvent) bool { func (UpdateOnlyPredicate) Delete(event.DeleteEvent) bool { return false } + +func (UpdateOnlyPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + return false + } + if e.ObjectNew == nil { + return false + } + + return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() +} diff --git a/pkg/controllers/work_status_controller_unit_test.go b/pkg/controllers/work_status_controller_test.go similarity index 91% rename from pkg/controllers/work_status_controller_unit_test.go rename to pkg/controllers/work_status_controller_test.go index 49534cc4..b7954943 100644 --- a/pkg/controllers/work_status_controller_unit_test.go +++ b/pkg/controllers/work_status_controller_test.go @@ -30,16 +30,17 @@ func TestCalculateNewAppliedWork(t *testing.T) { inputAppliedWork v1alpha1.AppliedWork expectedNewRes []v1alpha1.AppliedResourceMeta expectedStaleRes []v1alpha1.AppliedResourceMeta + hasErr bool }{ "AppliedWork and Work has been garbage collected; AppliedWork and Work of a resource both does not exist": { - r: WorkStatusReconciler{Joined: true}, + r: WorkStatusReconciler{joined: true}, inputWork: inputWork, inputAppliedWork: inputAppliedWork, expectedNewRes: []v1alpha1.AppliedResourceMeta(nil), expectedStaleRes: []v1alpha1.AppliedResourceMeta(nil), }, "AppliedWork and Work of a resource exists; there are nothing being deleted": { - r: WorkStatusReconciler{Joined: true}, + r: WorkStatusReconciler{joined: true}, inputWork: inputWorkWithResourceIdentifier, inputAppliedWork: inputAppliedWorkWithResourceIdentifier, expectedNewRes: []v1alpha1.AppliedResourceMeta{ @@ -51,7 +52,7 @@ func TestCalculateNewAppliedWork(t *testing.T) { expectedStaleRes: []v1alpha1.AppliedResourceMeta(nil), }, "Work resource has been deleted, but the corresponding AppliedWork remains": { - r: WorkStatusReconciler{Joined: true}, + r: WorkStatusReconciler{joined: true}, inputWork: inputWork, inputAppliedWork: inputAppliedWorkWithResourceIdentifier, expectedNewRes: []v1alpha1.AppliedResourceMeta(nil), @@ -63,7 +64,7 @@ func TestCalculateNewAppliedWork(t *testing.T) { }, }, "Work resource contains the status of a resource that does not exist within the AppliedWork resource.": { - r: WorkStatusReconciler{Joined: true}, + r: WorkStatusReconciler{joined: true}, inputWork: inputWorkWithResourceIdentifier, inputAppliedWork: inputAppliedWork, expectedNewRes: []v1alpha1.AppliedResourceMeta{ @@ -76,9 +77,12 @@ func TestCalculateNewAppliedWork(t *testing.T) { } for testName, tt := range tests { t.Run(testName, func(t *testing.T) { - newRes, staleRes := tt.r.calculateNewAppliedWork(&tt.inputWork, &tt.inputAppliedWork) + newRes, staleRes, err := tt.r.generateDiff(context.Background(), &tt.inputWork, &tt.inputAppliedWork) assert.Equalf(t, tt.expectedNewRes, newRes, "Testcase %s: NewRes is different from what it should be.", testName) assert.Equalf(t, tt.expectedStaleRes, staleRes, "Testcase %s: StaleRes is different from what it should be.", testName) + if tt.hasErr { + assert.Truef(t, err != nil, "Testcase %s: Should get an err.", testName) + } }) } } @@ -91,7 +95,7 @@ func TestStop(t *testing.T) { }{ "controller is being stopped": { reconciler: WorkStatusReconciler{ - Joined: false, + joined: false, }, ctrlResult: ctrl.Result{RequeueAfter: time.Second * 5}, wantErr: nil, diff --git a/pkg/utils/common.go b/pkg/utils/common.go new file mode 100644 index 00000000..c7bddf4f --- /dev/null +++ b/pkg/utils/common.go @@ -0,0 +1,42 @@ +package utils + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// UpsertOwnerRef create or inserts the owner reference to the object +func UpsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { + owners := object.GetOwnerReferences() + if idx := indexOwnerRef(owners, ref); idx == -1 { + owners = append(owners, ref) + } else { + owners[idx] = ref + } + object.SetOwnerReferences(owners) +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if ReferSameObject(r, ref) { + return index + } + } + return -1 +} + +// ReferSameObject returns true if a and b point to the same object. +func ReferSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} diff --git a/pkg/utils/test_utils.go b/pkg/utils/test_utils.go index 102e67d1..406e9c89 100644 --- a/pkg/utils/test_utils.go +++ b/pkg/utils/test_utils.go @@ -3,8 +3,6 @@ package utils import ( "github.com/onsi/gomega/format" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/record" ) @@ -37,38 +35,3 @@ func (matcher AlreadyExistMatcher) FailureMessage(actual interface{}) (message s func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, "not to be already exist") } - -func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { - owners := object.GetOwnerReferences() - if idx := indexOwnerRef(owners, ref); idx == -1 { - owners = append(owners, ref) - } else { - owners[idx] = ref - } - object.SetOwnerReferences(owners) -} - -// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. -func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { - for index, r := range ownerReferences { - if referSameObject(r, ref) { - return index - } - } - return -1 -} - -// Returns true if a and b point to the same object. -func referSameObject(a, b metav1.OwnerReference) bool { - aGV, err := schema.ParseGroupVersion(a.APIVersion) - if err != nil { - return false - } - - bGV, err := schema.ParseGroupVersion(b.APIVersion) - if err != nil { - return false - } - - return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name -} From 8aee597e81e4ab8044e400c00a8bc823f4938c92 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Wed, 17 Aug 2022 17:31:31 -0700 Subject: [PATCH 3/7] fix test --- pkg/controllers/apply_controller.go | 16 +++-- pkg/controllers/apply_controller_test.go | 83 +++++++++--------------- 2 files changed, 42 insertions(+), 57 deletions(-) diff --git a/pkg/controllers/apply_controller.go b/pkg/controllers/apply_controller.go index c003a33d..abfd056f 100644 --- a/pkg/controllers/apply_controller.go +++ b/pkg/controllers/apply_controller.go @@ -370,7 +370,8 @@ func (r *ApplyWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { // we have modified. func computeManifestHash(obj *unstructured.Unstructured) (string, error) { manifest := obj.DeepCopy() - // strip all the appliedWork owner fields + // strip all the appliedWork owner fields just in case + // those fields should not exist in the manifest curRefs := manifest.GetOwnerReferences() var newRefs []metav1.OwnerReference for _, ownerRef := range curRefs { @@ -388,8 +389,15 @@ func computeManifestHash(obj *unstructured.Unstructured) (string, error) { manifest.SetAnnotations(annotation) } // strip the status just in case + manifest.SetResourceVersion("") + manifest.SetGeneration(0) + manifest.SetUID("") + manifest.SetSelfLink("") + manifest.SetDeletionTimestamp(nil) + manifest.SetManagedFields(nil) + unstructured.RemoveNestedField(manifest.Object, "metadata", "creationTimestamp") + unstructured.RemoveNestedField(manifest.Object, "status") data := manifest.Object - delete(data, "status") // compute the sha256 hash of the remaining data jsonBytes, err := json.Marshal(data) if err != nil { @@ -437,7 +445,7 @@ func findManifestConditionByIdentifier(identifier workv1alpha1.ResourceIdentifie // setManifestHashAnnotation computes the hash of the provided manifest and sets an annotation of the // hash on the provided unstructured object. func setManifestHashAnnotation(manifestObj *unstructured.Unstructured) error { - specHash, err := computeManifestHash(manifestObj) + manifestHash, err := computeManifestHash(manifestObj) if err != nil { return err } @@ -446,7 +454,7 @@ func setManifestHashAnnotation(manifestObj *unstructured.Unstructured) error { if annotation == nil { annotation = map[string]string{} } - annotation[manifestHashAnnotation] = specHash + annotation[manifestHashAnnotation] = manifestHash manifestObj.SetAnnotations(annotation) return nil } diff --git a/pkg/controllers/apply_controller_test.go b/pkg/controllers/apply_controller_test.go index 19ef3ae3..2e62a2c2 100644 --- a/pkg/controllers/apply_controller_test.go +++ b/pkg/controllers/apply_controller_test.go @@ -671,12 +671,6 @@ func TestReconcile(t *testing.T) { func TestSetManifestHashAnnotation(t *testing.T) { // basic setup - appliedOwnerRef := metav1.OwnerReference{ - APIVersion: workv1alpha1.GroupVersion.String(), - Kind: workv1alpha1.AppliedWorkKind, - Name: utilrand.String(10), - UID: types.UID(utilrand.String(10)), - } manifestObj := appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "Deployment", @@ -697,56 +691,41 @@ func TestSetManifestHashAnnotation(t *testing.T) { }, }, } - // the normal case that the curObj has an annotation and owner reference with status - curObj := manifestObj.DeepCopy() - curObj.Annotations[manifestHashAnnotation] = utilrand.String(10) - curObj.OwnerReferences = append(curObj.OwnerReferences, appliedOwnerRef) - curObj.Status.Replicas = 10 - curObj.Status.ReadyReplicas = 1 + // pre-compute the hash + preObj := manifestObj.DeepCopy() + var uPreObj unstructured.Unstructured + uPreObj.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(preObj) + preHash, _ := computeManifestHash(&uPreObj) tests := map[string]struct { manifestObj interface{} - curObj interface{} isSame bool }{ - "the normal case after an object is applied": { - manifestObj: &manifestObj, - curObj: curObj, - isSame: true, - }, - "current is owned by multiple appliedWorks": { - manifestObj: &manifestObj, - curObj: func() *appsv1.Deployment { - extraObj := curObj.DeepCopy() - extraObj.OwnerReferences = append(extraObj.OwnerReferences, appliedOwnerRef) - return extraObj - }(), - isSame: true, - }, - "current status changed": { - manifestObj: &manifestObj, - curObj: func() *appsv1.Deployment { - extraObj := curObj.DeepCopy() + "manifest status changed": { + manifestObj: func() *appsv1.Deployment { + extraObj := manifestObj.DeepCopy() extraObj.Status.ReadyReplicas = 10 return extraObj }(), isSame: true, }, - "current's appliedWork owner is removed by the user": { - manifestObj: &manifestObj, - curObj: func() *appsv1.Deployment { - noObj := curObj.DeepCopy() - noObj.SetOwnerReferences(manifestObj.GetOwnerReferences()) - return noObj + "manifest's has hashAnnotation": { + manifestObj: func() *appsv1.Deployment { + alterObj := manifestObj.DeepCopy() + alterObj.Annotations[manifestHashAnnotation] = utilrand.String(10) + return alterObj }(), isSame: true, }, - "current's hash annotation is changed by the user": { - manifestObj: &manifestObj, - curObj: func() *appsv1.Deployment { - alterObj := curObj.DeepCopy() - alterObj.Annotations[manifestHashAnnotation] = utilrand.String(10) - return alterObj + "manifest has extra metadata": { + manifestObj: func() *appsv1.Deployment { + noObj := manifestObj.DeepCopy() + noObj.SetSelfLink(utilrand.String(2)) + noObj.SetResourceVersion(utilrand.String(4)) + noObj.SetGeneration(3) + noObj.SetUID(types.UID(utilrand.String(3))) + noObj.SetCreationTimestamp(metav1.Now()) + return noObj }(), isSame: true, }, @@ -756,7 +735,6 @@ func TestSetManifestHashAnnotation(t *testing.T) { alterObj.OwnerReferences[0].APIVersion = utilrand.String(10) return alterObj }(), - curObj: curObj, isSame: false, }, "manifest has a different label": { @@ -765,7 +743,6 @@ func TestSetManifestHashAnnotation(t *testing.T) { alterObj.SetLabels(map[string]string{utilrand.String(5): utilrand.String(10)}) return alterObj }(), - curObj: curObj, isSame: false, }, "manifest has a different annotation": { @@ -774,7 +751,6 @@ func TestSetManifestHashAnnotation(t *testing.T) { alterObj.SetAnnotations(map[string]string{utilrand.String(5): utilrand.String(10)}) return alterObj }(), - curObj: curObj, isSame: false, }, "manifest has a different spec": { @@ -783,20 +759,21 @@ func TestSetManifestHashAnnotation(t *testing.T) { alterObj.Spec.Replicas = pointer.Int32Ptr(100) return alterObj }(), - curObj: curObj, isSame: false, }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { - var uCurObj, uManifestObj unstructured.Unstructured + var uManifestObj unstructured.Unstructured uManifestObj.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(tt.manifestObj) - uCurObj.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(tt.curObj) - setManifestHashAnnotation(&uManifestObj) - setManifestHashAnnotation(&uCurObj) + err := setManifestHashAnnotation(&uManifestObj) + if err != nil { + t.Error("failed to marshall the manifest", err.Error()) + } manifestHash := uManifestObj.GetAnnotations()[manifestHashAnnotation] - curHash := uCurObj.GetAnnotations()[manifestHashAnnotation] - assert.Equalf(t, tt.isSame, manifestHash == curHash, "manifestObj = (%v), curObj = (%+v) ", tt.manifestObj, tt.curObj) + if tt.isSame != (manifestHash == preHash) { + t.Errorf("testcase %s failed: manifestObj = (%+v)", name, tt.manifestObj) + } }) } } From 014b73f298cd4ff1e37d350c42e42ec5ec2d668e Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Fri, 19 Aug 2022 16:19:41 -0700 Subject: [PATCH 4/7] address comments --- README.md | 2 +- pkg/controllers/appliedWork_controller.go | 119 --------------- pkg/controllers/apply_controller.go | 15 +- pkg/controllers/apply_controller_test.go | 24 ++- pkg/controllers/finalize_controller.go | 139 ------------------ .../finalize_controller_integration_test.go | 126 ---------------- pkg/controllers/finalize_controller_test.go | 125 ---------------- pkg/controllers/resource_tracker.go | 100 ------------- pkg/controllers/work_status_controller.go | 90 ++++++++++-- .../work_status_controller_test.go | 16 ++ pkg/utils/common.go | 20 ++- pkg/utils/test_utils.go | 16 ++ tests/e2e/apply_test.go | 10 +- 13 files changed, 157 insertions(+), 645 deletions(-) delete mode 100644 pkg/controllers/appliedWork_controller.go delete mode 100644 pkg/controllers/finalize_controller.go delete mode 100644 pkg/controllers/finalize_controller_integration_test.go delete mode 100644 pkg/controllers/finalize_controller_test.go delete mode 100644 pkg/controllers/resource_tracker.go diff --git a/README.md b/README.md index 9189bc86..93fab48c 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ kubectl apply -k deploy ```shell kubectl delete secret hub-kubeconfig-secret -n fleet-system kubectl create secret generic hub-kubeconfig-secret --from-file=kubeconfig=/Users/ryanzhang/.kube/hub -n fleet-system -go run cmd/workcontroller/workcontroller.go --work-namespace=default --hub-kubeconfig-secret=hub-kubeconfig-secret +go run cmd/workcontroller/workcontroller.go --work-namespace=default --hub-kubeconfig-secret=hub-kubeconfig-secret -v 5 -add_dir_header ``` diff --git a/pkg/controllers/appliedWork_controller.go b/pkg/controllers/appliedWork_controller.go deleted file mode 100644 index a4f39c4d..00000000 --- a/pkg/controllers/appliedWork_controller.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" -) - -// AppliedWorkReconciler reconciles an AppliedWork object -type AppliedWorkReconciler struct { - appliedResourceTracker - clusterNameSpace string -} - -// Reconcile implement the control loop logic for AppliedWork object. -func (r *AppliedWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.InfoS("applied work reconcile loop triggered", "item", req.NamespacedName) - nsWorkName := req.NamespacedName - nsWorkName.Namespace = r.clusterNameSpace - _, appliedWork, err := r.fetchWorks(ctx, nsWorkName) - if err != nil { - return ctrl.Result{}, err - } - // work has been garbage collected, stop the periodic check if it's gone - if appliedWork == nil { - return ctrl.Result{}, nil - } - - _, err = r.collectDisappearedWorks(ctx, appliedWork) - if err != nil { - klog.ErrorS(err, "failed to delete all the stale work", "work", req.NamespacedName) - // we can't proceed to update the applied - return ctrl.Result{}, err - } - - // we want to periodically check if what we've applied matches what is recorded - return ctrl.Result{RequeueAfter: time.Minute}, nil -} - -// collectDisappearedWorks returns the list of resource that does not exist in the appliedWork -func (r *AppliedWorkReconciler) collectDisappearedWorks( - ctx context.Context, appliedWork *workapi.AppliedWork) ([]workapi.AppliedResourceMeta, error) { - var errs []error - var disappearedWorks, newRes []workapi.AppliedResourceMeta - workUIDChanged := false - for _, resourceMeta := range appliedWork.Status.AppliedResources { - gvr := schema.GroupVersionResource{ - Group: resourceMeta.Group, - Version: resourceMeta.Version, - Resource: resourceMeta.Resource, - } - obj, err := r.spokeDynamicClient.Resource(gvr).Namespace(resourceMeta.Namespace).Get(ctx, resourceMeta.Name, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - klog.InfoS("found a disappeared work", "work", resourceMeta) - disappearedWorks = append(disappearedWorks, resourceMeta) - } else { - klog.ErrorS(err, "failed to get a work", "work", resourceMeta) - errs = append(errs, err) - } - } else { - if resourceMeta.UID != obj.GetUID() { - workUIDChanged = true - if len(resourceMeta.UID) != 0 { - klog.InfoS("found a re-created work", "work", - resourceMeta, "old UID", resourceMeta.UID, "new UID", obj.GetUID()) - } else { - klog.InfoS("attach to a newly created work", "work", - resourceMeta, "new UID", obj.GetUID()) - } - } - // set the UID back - resourceMeta.UID = obj.GetUID() - newRes = append(newRes, resourceMeta) - } - } - - if workUIDChanged && len(errs) == 0 { - appliedWork.Status.AppliedResources = newRes - klog.InfoS("Update an appliedWork status with new object UID", "work", appliedWork.GetName()) - if err := r.spokeClient.Status().Update(ctx, appliedWork, &client.UpdateOptions{}); err != nil { - klog.ErrorS(err, "update appliedWork status failed", "appliedWork", appliedWork.GetName()) - return disappearedWorks, err - } - } - - return disappearedWorks, utilerrors.NewAggregate(errs) - -} - -// SetupWithManager wires up the controller. -func (r *AppliedWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr).For(&workapi.AppliedWork{}).Complete(r) -} diff --git a/pkg/controllers/apply_controller.go b/pkg/controllers/apply_controller.go index abfd056f..6acfaf69 100644 --- a/pkg/controllers/apply_controller.go +++ b/pkg/controllers/apply_controller.go @@ -108,7 +108,7 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return r.garbageCollectAppliedWork(ctx, work) } - // Get the appliedWork + // ensure that the appliedWork and the finalizer exist appliedWork, err := r.ensureAppliedWork(ctx, work) if err != nil { return ctrl.Result{}, err @@ -134,12 +134,12 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } if len(errs) != 0 { - klog.InfoS("manifest apply incomplete; the message is queued again for reconciliation", - "work", kLogObjRef, "err", utilerrors.NewAggregate(errs)) + klog.ErrorS(utilerrors.NewAggregate(errs), "manifest apply incomplete; the message is queued again for reconciliation", + "work", kLogObjRef) return ctrl.Result{}, utilerrors.NewAggregate(errs) } klog.InfoS("apply the work successfully ", "work", kLogObjRef) - r.recorder.Event(work, v1.EventTypeNormal, "ReconciliationComplete", "apply the work successfully") + r.recorder.Event(work, v1.EventTypeNormal, "ApplyWorkSucceed", "apply the work successfully") // we periodically reconcile the work to make sure the member cluster state is in sync with the work return ctrl.Result{RequeueAfter: time.Minute * 5}, nil } @@ -224,7 +224,7 @@ func (r *ApplyWorkReconciler) applyManifests(ctx context.Context, manifests []wo if err != nil { result.err = err } else { - utils.UpsertOwnerRef(owner, rawObj) + utils.AddOwnerRef(owner, rawObj) appliedObj, result.updated, result.err = r.applyUnstructured(ctx, gvr, rawObj) result.identifier = buildResourceIdentifier(index, rawObj, gvr) kLogObjRef := klog.ObjectRef{ @@ -388,7 +388,7 @@ func computeManifestHash(obj *unstructured.Unstructured) (string, error) { delete(annotation, manifestHashAnnotation) manifest.SetAnnotations(annotation) } - // strip the status just in case + // strip the live object related fields just in case manifest.SetResourceVersion("") manifest.SetGeneration(0) manifest.SetUID("") @@ -397,9 +397,8 @@ func computeManifestHash(obj *unstructured.Unstructured) (string, error) { manifest.SetManagedFields(nil) unstructured.RemoveNestedField(manifest.Object, "metadata", "creationTimestamp") unstructured.RemoveNestedField(manifest.Object, "status") - data := manifest.Object // compute the sha256 hash of the remaining data - jsonBytes, err := json.Marshal(data) + jsonBytes, err := json.Marshal(manifest) if err != nil { return "", err } diff --git a/pkg/controllers/apply_controller_test.go b/pkg/controllers/apply_controller_test.go index 2e62a2c2..a7c90912 100644 --- a/pkg/controllers/apply_controller_test.go +++ b/pkg/controllers/apply_controller_test.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( @@ -208,9 +224,11 @@ func TestApplyUnstructured(t *testing.T) { testDeploymentDiffSpec := testDeployment.DeepCopy() testDeploymentDiffSpec.Spec.MinReadySeconds = 0 rawDiffSpec, _ := json.Marshal(testDeploymentDiffSpec) - testManifestDiffSpec := workv1alpha1.Manifest{RawExtension: runtime.RawExtension{ - Raw: rawDiffSpec, - }} + testManifestDiffSpec := workv1alpha1.Manifest{ + RawExtension: runtime.RawExtension{ + Raw: rawDiffSpec, + }, + } diffSpecObj, diffSpecDynamicClient, diffSpecHash := createObjAndDynamicClient(testManifestDiffSpec.Raw) patchFailClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) diff --git a/pkg/controllers/finalize_controller.go b/pkg/controllers/finalize_controller.go deleted file mode 100644 index 10789afb..00000000 --- a/pkg/controllers/finalize_controller.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" -) - -// FinalizeWorkReconciler reconciles a Work object for finalization -type FinalizeWorkReconciler struct { - client client.Client - spokeClient client.Client - recorder record.EventRecorder - Joined bool -} - -func NewFinalizeWorkReconciler(hubClient client.Client, spokeClient client.Client, recorder record.EventRecorder, joined bool) *FinalizeWorkReconciler { - return &FinalizeWorkReconciler{ - client: hubClient, - spokeClient: spokeClient, - recorder: recorder, - Joined: joined, - } -} - -// Reconcile implement the control loop logic for finalizing Work object. -func (r *FinalizeWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - if !r.Joined { - klog.InfoS("finalize controller is not started yet", "work", req.NamespacedName) - return ctrl.Result{RequeueAfter: time.Second * 5}, nil - } - klog.InfoS("Work finalize controller reconcile loop triggered.", "work", req.NamespacedName) - - work := &workv1alpha1.Work{} - err := r.client.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, work) - switch { - case errors.IsNotFound(err): - return ctrl.Result{}, nil - case err != nil: - klog.ErrorS(err, "fail to retrieve the work resource", "work", req.NamespacedName) - return ctrl.Result{}, err - } - - kLogObjRef := klog.KObj(work) - - // cleanup finalizer and resources - if !work.DeletionTimestamp.IsZero() { - return r.garbageCollectAppliedWork(ctx, work) - } - - appliedWork := &workv1alpha1.AppliedWork{} - if controllerutil.ContainsFinalizer(work, workFinalizer) { - err = r.spokeClient.Get(ctx, req.NamespacedName, appliedWork) - switch { - case errors.IsNotFound(err): - klog.ErrorS(err, "appliedWork finalizer resource does not exist yet, it will be created", "AppliedWork", kLogObjRef.Name) - case err != nil: - klog.ErrorS(err, "failed to retrieve the appliedWork", "AppliedWork", kLogObjRef.Name) - return ctrl.Result{}, err - } - } - - klog.InfoS("create the appliedWork resource", "AppliedWork", kLogObjRef.Name) - appliedWork = &workv1alpha1.AppliedWork{ - ObjectMeta: metav1.ObjectMeta{ - Name: req.Name, - }, - Spec: workv1alpha1.AppliedWorkSpec{ - WorkName: req.Name, - WorkNamespace: req.Namespace, - }, - } - err = r.spokeClient.Create(ctx, appliedWork) - if err != nil && !errors.IsAlreadyExists(err) { - // if this conflicts, we'll simply try again later - klog.ErrorS(err, "appliedWork create failed", "AppliedWork", kLogObjRef.Name) - return ctrl.Result{}, err - } - work.Finalizers = append(work.Finalizers, workFinalizer) - - return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) -} - -// garbageCollectAppliedWork deletes the applied work -func (r *FinalizeWorkReconciler) garbageCollectAppliedWork(ctx context.Context, work *workv1alpha1.Work) (ctrl.Result, error) { - if controllerutil.ContainsFinalizer(work, workFinalizer) { - deletePolicy := metav1.DeletePropagationForeground - appliedWork := workv1alpha1.AppliedWork{} - err := r.spokeClient.Get(ctx, types.NamespacedName{Name: work.Name}, &appliedWork) - if err != nil { - klog.ErrorS(err, "failed to retrieve the appliedWork", "AppliedWork", work.Name) - return ctrl.Result{}, err - } - err = r.spokeClient.Delete(ctx, &appliedWork, &client.DeleteOptions{PropagationPolicy: &deletePolicy}) - if err != nil { - klog.ErrorS(err, "failed to delete the appliedWork", "AppliedWork", work.Name) - return ctrl.Result{}, err - } - klog.InfoS("successfully deleted the appliedWork", "AppliedWork", work.Name) - - controllerutil.RemoveFinalizer(work, workFinalizer) - } - - return ctrl.Result{}, r.client.Update(ctx, work, &client.UpdateOptions{}) -} - -// SetupWithManager wires up the controller. -func (r *FinalizeWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr).For(&workv1alpha1.Work{}, - builder.WithPredicates(predicate.GenerationChangedPredicate{})).Complete(r) -} diff --git a/pkg/controllers/finalize_controller_integration_test.go b/pkg/controllers/finalize_controller_integration_test.go deleted file mode 100644 index ab11b735..00000000 --- a/pkg/controllers/finalize_controller_integration_test.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilrand "k8s.io/apimachinery/pkg/util/rand" -) - -var _ = Describe("Garbage Collected", func() { - var resourceName string - var resourceNamespace string - var workName string - var workNamespace string - - const timeout = time.Second * 30 - const interval = time.Second * 1 - - // BeforeEach test ensure: - // #1 - A namespace exists for the Work to reside within. - // #2 - A namespace exists for where the manifest object would be created within. - // #3 - A manifest of some type should be within the Work object. - BeforeEach(func() { - workName = "wn-" + utilrand.String(5) - workNamespace = "wns-" + utilrand.String(5) - resourceName = "rn-" + utilrand.String(5) - resourceNamespace = "rns" + utilrand.String(5) - - wns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: workNamespace, - }, - } - _, err := k8sClient.CoreV1().Namespaces().Create(context.Background(), wns, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) - - rns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceNamespace, - }, - } - _, err = k8sClient.CoreV1().Namespaces().Create(context.Background(), rns, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) - - // Create the Work object with some type of Manifest resource. - cm := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: resourceNamespace, - }, - Data: map[string]string{ - "test": "test", - }, - } - - work := &workv1alpha1.Work{ - ObjectMeta: metav1.ObjectMeta{ - Name: workName, - Namespace: workNamespace, - }, - Spec: workv1alpha1.WorkSpec{ - Workload: workv1alpha1.WorkloadTemplate{ - Manifests: []workv1alpha1.Manifest{ - { - RawExtension: runtime.RawExtension{Object: cm}, - }, - }, - }, - }, - } - - createWorkErr := workClient.Create(context.Background(), work) - Expect(createWorkErr).ToNot(HaveOccurred()) - }) - - // AfterEach test ensure: - AfterEach(func() { - // Add any teardown steps that needs to be executed after each test - err := k8sClient.CoreV1().Namespaces().Delete(context.Background(), workNamespace, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) - }) - - Context("A Work object with manifests has been created.", func() { - It("Should have created an AppliedWork object", func() { - Eventually(func() bool { - appliedWorkObject := workv1alpha1.AppliedWork{} - err := workClient.Get(context.Background(), types.NamespacedName{ - Namespace: workNamespace, - Name: workName, - }, &appliedWorkObject) - if err == nil { - return appliedWorkObject.Spec.WorkName == workName - } - return false - }, timeout, interval).Should(BeTrue()) - }) - }) -}) diff --git a/pkg/controllers/finalize_controller_test.go b/pkg/controllers/finalize_controller_test.go deleted file mode 100644 index 981b2277..00000000 --- a/pkg/controllers/finalize_controller_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package controllers - -import ( - "context" - "testing" - "time" - - "github.com/crossplane/crossplane-runtime/pkg/test" - "github.com/stretchr/testify/assert" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/work-api/pkg/utils" - - workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" -) - -// TestWrapper is a struct used to encapsulate the mocked dependencies needed to simulate a specific flow in logic. -type TestWrapper struct { - mockAppliedWork *workv1alpha1.AppliedWork - mockReconciler *FinalizeWorkReconciler - mockWork *workv1alpha1.Work -} - -func TestGarbageCollectAppliedWork(t *testing.T) { - - // The happy path constitutes a Work object with a finalizer & an associated AppliedWork object - happyPathTestWrapper := generateTestWrapper() - controllerutil.AddFinalizer(happyPathTestWrapper.mockWork, workFinalizer) - - tests := map[string]struct { - r FinalizeWorkReconciler - tw TestWrapper - expectedResult ctrl.Result - expectedError error - }{ - "Happy Path: AppliedWork deleted, work finalizer removed": { - r: *happyPathTestWrapper.mockReconciler, - tw: *happyPathTestWrapper, - }, - } - for testName, tt := range tests { - t.Run(testName, func(t *testing.T) { - _, err := tt.r.garbageCollectAppliedWork(context.Background(), tt.tw.mockWork) - - assert.False(t, controllerutil.ContainsFinalizer(tt.tw.mockWork, workFinalizer), "The Work object still contains a finalizer, it should not.") - assert.NoError(t, err, "An error occurred but none was expected.") - }) - } -} - -func TestFinalizerReconcile(t *testing.T) { - - tests := map[string]struct { - r FinalizeWorkReconciler - req ctrl.Request - expectedResult ctrl.Result - expectedError error - }{ - "Controller not joined": { - r: FinalizeWorkReconciler{Joined: false}, - req: ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: "work" + rand.String(5), - Name: "work" + rand.String(5), - }, - }, - expectedResult: ctrl.Result{RequeueAfter: time.Second * 5}, - expectedError: nil, - }, - } - for testName, tt := range tests { - t.Run(testName, func(t *testing.T) { - ctrlResult, err := tt.r.Reconcile(ctx, tt.req) - assert.Equalf(t, tt.expectedResult, ctrlResult, "wrong ctrlResult for testcase %s", testName) - assert.Equal(t, tt.expectedError, err) - }) - } -} - -func generateWork(workName string, workNamespace string) *workv1alpha1.Work { - return &workv1alpha1.Work{ - ObjectMeta: metaV1.ObjectMeta{ - Name: workName, - Namespace: workNamespace, - }, - Spec: workv1alpha1.WorkSpec{ - Workload: workv1alpha1.WorkloadTemplate{ - Manifests: []workv1alpha1.Manifest{}, - }, - }, - } -} - -func generateAppliedWork(workName string) *workv1alpha1.AppliedWork { - return &workv1alpha1.AppliedWork{ - ObjectMeta: metaV1.ObjectMeta{ - Name: workName, - }, - Spec: workv1alpha1.AppliedWorkSpec{ - WorkName: workName, - }, - } -} - -func generateTestWrapper() *TestWrapper { - workName := rand.String(5) - workNameSpace := rand.String(5) - - mockAppliedWork := generateAppliedWork(workName) - mockWork := generateWork(workName, workNameSpace) - - return &TestWrapper{ - mockReconciler: &FinalizeWorkReconciler{ - client: test.NewMockClient(), - recorder: utils.NewFakeRecorder(2), - spokeClient: test.NewMockClient(), - Joined: true, - }, - mockAppliedWork: mockAppliedWork, - mockWork: mockWork, - } -} diff --git a/pkg/controllers/resource_tracker.go b/pkg/controllers/resource_tracker.go deleted file mode 100644 index 055b2ea3..00000000 --- a/pkg/controllers/resource_tracker.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - - workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" -) - -// TODO: merge this back to the work status controller -type appliedResourceTracker struct { - hubClient client.Client - spokeClient client.Client - spokeDynamicClient dynamic.Interface - restMapper meta.RESTMapper -} - -// Reconcile the difference between the work status/appliedWork status/what is on the member cluster -// work.status represents what should be on the member cluster (it cannot be empty, we will reject empty work) -// appliedWork.status represents what was on the member cluster (it's okay for it to be empty) -// Objects in the appliedWork.status but not in the work.status should be removed from the member cluster. -// We then go through all the work.status manifests whose condition is successfully applied -// For each of them, we check if the object exists in the member cluster, if not, we recreate it according to the original manifest -// We insert it into the new appliedWork.Status - -func (r *appliedResourceTracker) fetchWorks(ctx context.Context, nsWorkName types.NamespacedName) (*workapi.Work, *workapi.AppliedWork, error) { - work := &workapi.Work{} - appliedWork := &workapi.AppliedWork{} - - // fetch work CR from the hub cluster - err := r.hubClient.Get(ctx, nsWorkName, work) - switch { - case errors.IsNotFound(err): - klog.V(4).InfoS("work resource does not exist", "item", nsWorkName) - work = nil - case err != nil: - klog.ErrorS(err, "failed to get the work obj", "item", nsWorkName) - return nil, nil, err - default: - klog.V(5).InfoS("work exists in the hub cluster", "item", nsWorkName) - } - - // fetch appliedWork CR from the member cluster - err = r.spokeClient.Get(ctx, nsWorkName, appliedWork) - switch { - case errors.IsNotFound(err): - klog.V(4).InfoS("appliedWork obj does not exist", "item", nsWorkName) - appliedWork = nil - case err != nil: - klog.ErrorS(err, "failed to get appliedWork", "item", nsWorkName) - return nil, nil, err - default: - klog.V(5).InfoS("appliedWork exists in the member cluster", "item", nsWorkName) - } - - if err := checkConsistentExist(work, appliedWork, nsWorkName); err != nil { - klog.ErrorS(err, "applied/work object existence not consistent", "item", nsWorkName) - return nil, nil, err - } - - return work, appliedWork, nil -} - -func checkConsistentExist(work *workapi.Work, appliedWork *workapi.AppliedWork, workName types.NamespacedName) error { - // work already deleted - if work == nil && appliedWork != nil { - return fmt.Errorf("work finalizer didn't delete the appliedWork %s", workName) - } - // we are triggered by appliedWork change or work update so the appliedWork should already be here - if work != nil && appliedWork == nil { - return fmt.Errorf("work controller didn't create the appliedWork %s", workName) - } - if work == nil && appliedWork == nil { - klog.InfoS("both applied and work are garbage collected", "item", workName) - } - return nil -} diff --git a/pkg/controllers/work_status_controller.go b/pkg/controllers/work_status_controller.go index 40e3e8e8..c6e258f2 100644 --- a/pkg/controllers/work_status_controller.go +++ b/pkg/controllers/work_status_controller.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/record" @@ -37,25 +38,28 @@ import ( "time" ) +// TODO: merge this with the apply controller + // WorkStatusReconciler reconciles a Work object when its status changes type WorkStatusReconciler struct { - appliedResourceTracker - recorder record.EventRecorder - concurrency int - joined bool + hubClient client.Client + spokeClient client.Client + spokeDynamicClient dynamic.Interface + restMapper meta.RESTMapper + recorder record.EventRecorder + concurrency int + joined bool } func NewWorkStatusReconciler(hubClient client.Client, spokeDynamicClient dynamic.Interface, spokeClient client.Client, restMapper meta.RESTMapper, recorder record.EventRecorder, concurrency int, joined bool) *WorkStatusReconciler { return &WorkStatusReconciler{ - appliedResourceTracker: appliedResourceTracker{ - hubClient: hubClient, - spokeClient: spokeClient, - spokeDynamicClient: spokeDynamicClient, - restMapper: restMapper, - }, - recorder: recorder, - concurrency: concurrency, - joined: joined, + hubClient: hubClient, + spokeClient: spokeClient, + spokeDynamicClient: spokeDynamicClient, + restMapper: restMapper, + recorder: recorder, + concurrency: concurrency, + joined: joined, } } @@ -105,6 +109,51 @@ func (r *WorkStatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } +// Reconcile the difference between the work status/appliedWork status/what is on the member cluster +// work.status represents what should be on the member cluster (it cannot be empty, we will reject empty work) +// appliedWork.status represents what was on the member cluster (it's okay for it to be empty) +// Objects in the appliedWork.status but not in the work.status should be removed from the member cluster. +// We then go through all the work.status manifests whose condition is successfully applied +// For each of them, we check if the object exists in the member cluster, if not, we recreate it according to the original manifest +// We insert it into the new appliedWork.Status +func (r *WorkStatusReconciler) fetchWorks(ctx context.Context, nsWorkName types.NamespacedName) (*workapi.Work, *workapi.AppliedWork, error) { + work := &workapi.Work{} + appliedWork := &workapi.AppliedWork{} + + // fetch work CR from the hub cluster + err := r.hubClient.Get(ctx, nsWorkName, work) + switch { + case apierrors.IsNotFound(err): + klog.V(4).InfoS("work resource does not exist", "item", nsWorkName) + work = nil + case err != nil: + klog.ErrorS(err, "failed to get the work obj", "item", nsWorkName) + return nil, nil, err + default: + klog.V(5).InfoS("work exists in the hub cluster", "item", nsWorkName) + } + + // fetch appliedWork CR from the member cluster + err = r.spokeClient.Get(ctx, nsWorkName, appliedWork) + switch { + case apierrors.IsNotFound(err): + klog.V(4).InfoS("appliedWork obj does not exist", "item", nsWorkName) + appliedWork = nil + case err != nil: + klog.ErrorS(err, "failed to get appliedWork", "item", nsWorkName) + return nil, nil, err + default: + klog.V(5).InfoS("appliedWork exists in the member cluster", "item", nsWorkName) + } + + if err := checkConsistentExist(work, appliedWork, nsWorkName); err != nil { + klog.ErrorS(err, "applied/work object existence not consistent", "item", nsWorkName) + return nil, nil, err + } + + return work, appliedWork, nil +} + // generateDiff check the difference between what is supposed to be applied (tracked by the work CR status) // and what was applied in the member cluster (tracked by the appliedWork CR). // What is in the `appliedWork` but not in the `work` should be deleted from the member cluster @@ -208,6 +257,21 @@ func (r *WorkStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } +func checkConsistentExist(work *workapi.Work, appliedWork *workapi.AppliedWork, workName types.NamespacedName) error { + // work already deleted + if work == nil && appliedWork != nil { + return fmt.Errorf("work finalizer didn't delete the appliedWork %s", workName) + } + // we are triggered by appliedWork change or work update so the appliedWork should already be here + if work != nil && appliedWork == nil { + return fmt.Errorf("work controller didn't create the appliedWork %s", workName) + } + if work == nil && appliedWork == nil { + klog.InfoS("both applied and work are garbage collected", "item", workName) + } + return nil +} + // UpdateOnlyPredicate is used since we only need to process the update event type UpdateOnlyPredicate struct { predicate.Funcs diff --git a/pkg/controllers/work_status_controller_test.go b/pkg/controllers/work_status_controller_test.go index b7954943..6f9005a1 100644 --- a/pkg/controllers/work_status_controller_test.go +++ b/pkg/controllers/work_status_controller_test.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( diff --git a/pkg/utils/common.go b/pkg/utils/common.go index c7bddf4f..e162f194 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package utils import ( @@ -5,8 +21,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// UpsertOwnerRef create or inserts the owner reference to the object -func UpsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { +// AddOwnerRef creates or inserts the owner reference to the object +func AddOwnerRef(ref metav1.OwnerReference, object metav1.Object) { owners := object.GetOwnerReferences() if idx := indexOwnerRef(owners, ref); idx == -1 { owners = append(owners, ref) diff --git a/pkg/utils/test_utils.go b/pkg/utils/test_utils.go index 406e9c89..7d79ca72 100644 --- a/pkg/utils/test_utils.go +++ b/pkg/utils/test_utils.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package utils import ( diff --git a/tests/e2e/apply_test.go b/tests/e2e/apply_test.go index 55d3c08b..09c0903a 100644 --- a/tests/e2e/apply_test.go +++ b/tests/e2e/apply_test.go @@ -96,7 +96,6 @@ var WorkCreatedContext = func(description string, manifestFiles []string) bool { BeforeEach(func() { mDetails = generateManifestDetails(manifestFiles) - workObj := createWorkObj( getWorkName(5), defaultWorkNamespace, @@ -118,10 +117,8 @@ var WorkCreatedContext = func(description string, manifestFiles []string) bool { Eventually(func() error { appliedWork := workapi.AppliedWork{} err := spokeClient.Get(context.Background(), types.NamespacedName{ - Namespace: createdWork.Namespace, - Name: createdWork.Name, + Name: createdWork.Name, }, &appliedWork) - return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -129,7 +126,6 @@ var WorkCreatedContext = func(description string, manifestFiles []string) bool { Eventually(func() error { _, err := spokeKubeClient.AppsV1().Deployments(mDetails[0].ObjMeta.Namespace). Get(context.Background(), mDetails[0].ObjMeta.Name, metav1.GetOptions{}) - return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -137,7 +133,6 @@ var WorkCreatedContext = func(description string, manifestFiles []string) bool { Eventually(func() error { _, err := spokeKubeClient.CoreV1().Services(mDetails[1].ObjMeta.Namespace). Get(context.Background(), mDetails[1].ObjMeta.Name, metav1.GetOptions{}) - return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -163,13 +158,11 @@ var WorkCreatedWithCRDContext = func(description string, manifestFiles []string) BeforeEach(func() { manifestDetails = generateManifestDetails(manifestFiles) - workObj := createWorkObj( getWorkName(5), defaultWorkNamespace, manifestDetails, ) - err = createWork(workObj) createdWork, err = retrieveWork(workObj.Namespace, workObj.Name) Expect(err).ToNot(HaveOccurred()) @@ -184,7 +177,6 @@ var WorkCreatedWithCRDContext = func(description string, manifestFiles []string) Eventually(func() error { By("verifying the CRD exists within the spoke") _, err = spokeApiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), manifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) - return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) }) From 94de11ad1f7b8c2032e11579bf61a04b458e2838 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Fri, 19 Aug 2022 19:39:57 -0700 Subject: [PATCH 5/7] fix test --- Makefile | 1 - README.md | 18 ++- pkg/apis/v1alpha1/zz_generated.deepcopy.go | 2 +- pkg/controllers/apply_controller.go | 23 ++-- .../apply_controller_integratoin_test.go | 3 +- pkg/controllers/apply_controller_test.go | 45 ++++--- pkg/controllers/suite_test.go | 1 + pkg/controllers/work_status_controller.go | 4 +- pkg/utils/common.go | 12 ++ tests/e2e/apply_test.go | 110 +++++++++++------- 10 files changed, 140 insertions(+), 79 deletions(-) diff --git a/Makefile b/Makefile index ff2ee83e..95567206 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,6 @@ VERSION_KEY := sigs.k8s.io/work-api/version.VelaVersion GITVERSION_KEY := sigs.k8s.io/work-api/version.GitRevision LDFLAGS ?= "-s -w -X $(VERSION_KEY)=$(VERSION) -X $(GITVERSION_KEY)=$(GIT_COMMIT)" - GOHOSTOS ?=$(shell go env GOHOSTOS) GOHOSTARCH ?=$(shell go env GOHOSTARCH) K8S_VERSION ?=1.19.2 diff --git a/README.md b/README.md index 93fab48c..b0168bbf 100644 --- a/README.md +++ b/README.md @@ -65,13 +65,29 @@ rm hub-kubeconfig kubectl apply -k deploy ``` -### Run the controller against the Spoke cluster locally +### Run the controller against the Spoke cluster locally +Create a secret in the Spoke cluster that contains the kubconfig file pointing to the hub and run your code against. ```shell +kubectl create namespace fleet-system kubectl delete secret hub-kubeconfig-secret -n fleet-system kubectl create secret generic hub-kubeconfig-secret --from-file=kubeconfig=/Users/ryanzhang/.kube/hub -n fleet-system go run cmd/workcontroller/workcontroller.go --work-namespace=default --hub-kubeconfig-secret=hub-kubeconfig-secret -v 5 -add_dir_header ``` +### Run the e2e against a cluster locally +Create a secret in the cluster that contains the kubconfig file pointing to itself. Run the feature code against it. +```shell +kubectl create namespace fleet-system +kubectl delete secret hub-kubeconfig-secret -n fleet-system +kubectl create secret generic hub-kubeconfig-secret --from-file=kubeconfig=/Users/ryanzhang/.kube/member-a -n fleet-system +go run cmd/workcontroller/workcontroller.go --work-namespace=default --hub-kubeconfig-secret=hub-kubeconfig-secret -v 5 -add_dir_header + +``` +run the test in another window +``` +export KUBECONFIG=kubeconfig=/Users/ryanzhang/.kube/member-a +go test . -test.v -ginkgo.v +``` ### Deploy a Work on the Hub cluster On the `Hub` cluster terminal, run the following command: diff --git a/pkg/apis/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/v1alpha1/zz_generated.deepcopy.go index 515039b5..56e2dff3 100644 --- a/pkg/apis/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/controllers/apply_controller.go b/pkg/controllers/apply_controller.go index 6acfaf69..7650ec62 100644 --- a/pkg/controllers/apply_controller.go +++ b/pkg/controllers/apply_controller.go @@ -21,7 +21,6 @@ import ( "crypto/sha256" "encoding/json" "fmt" - "sigs.k8s.io/work-api/pkg/utils" "time" v1 "k8s.io/api/core/v1" @@ -44,6 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" + "sigs.k8s.io/work-api/pkg/utils" ) const ( @@ -129,8 +129,8 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // update the work status if err = r.client.Status().Update(ctx, work, &client.UpdateOptions{}); err != nil { - errs = append(errs, err) klog.ErrorS(err, "failed to update work status", "work", kLogObjRef) + return ctrl.Result{}, err } if len(errs) != 0 { @@ -273,7 +273,7 @@ func (r *ApplyWorkReconciler) applyUnstructured(ctx context.Context, gvr schema. if apierrors.IsNotFound(err) { actual, createErr := r.spokeDynamicClient.Resource(gvr).Namespace(manifestObj.GetNamespace()).Create( ctx, manifestObj, metav1.CreateOptions{FieldManager: workFieldManagerName}) - if createErr == nil || apierrors.IsAlreadyExists(createErr) { + if createErr == nil { klog.V(4).InfoS("successfully created the manfiest", "gvr", gvr, "manfiest", manifestRef) return actual, true, nil } @@ -311,6 +311,9 @@ func (r *ApplyWorkReconciler) patchCurrentResource(ctx context.Context, gvr sche klog.V(5).InfoS("manifest is modified", "gvr", gvr, "manifest", manifestRef, "new hash", manifestObj.GetAnnotations()[manifestHashAnnotation], "existing hash", curObj.GetAnnotations()[manifestHashAnnotation]) + // merge owner refes since the patch does just replace + manifestObj.SetOwnerReferences(utils.MergeOwnerReference(curObj.GetOwnerReferences(), manifestObj.GetOwnerReferences())) + newData, err := manifestObj.MarshalJSON() if err != nil { klog.ErrorS(err, "failed to JSON marshall manifest", "gvr", gvr, "manifest", manifestRef) @@ -370,18 +373,6 @@ func (r *ApplyWorkReconciler) SetupWithManager(mgr ctrl.Manager) error { // we have modified. func computeManifestHash(obj *unstructured.Unstructured) (string, error) { manifest := obj.DeepCopy() - // strip all the appliedWork owner fields just in case - // those fields should not exist in the manifest - curRefs := manifest.GetOwnerReferences() - var newRefs []metav1.OwnerReference - for _, ownerRef := range curRefs { - if ownerRef.APIVersion == workv1alpha1.GroupVersion.String() && ownerRef.Kind == workv1alpha1.AppliedWorkKind { - // we skip the appliedWork owner - continue - } - newRefs = append(newRefs, ownerRef) - } - manifest.SetOwnerReferences(newRefs) // remove the manifestHash Annotation just in case annotation := manifest.GetAnnotations() if annotation != nil { @@ -398,7 +389,7 @@ func computeManifestHash(obj *unstructured.Unstructured) (string, error) { unstructured.RemoveNestedField(manifest.Object, "metadata", "creationTimestamp") unstructured.RemoveNestedField(manifest.Object, "status") // compute the sha256 hash of the remaining data - jsonBytes, err := json.Marshal(manifest) + jsonBytes, err := json.Marshal(manifest.Object) if err != nil { return "", err } diff --git a/pkg/controllers/apply_controller_integratoin_test.go b/pkg/controllers/apply_controller_integratoin_test.go index 87d47803..6cac6b97 100644 --- a/pkg/controllers/apply_controller_integratoin_test.go +++ b/pkg/controllers/apply_controller_integratoin_test.go @@ -19,7 +19,6 @@ package controllers import ( "context" "fmt" - "k8s.io/apimachinery/pkg/types" "time" . "github.com/onsi/ginkgo" @@ -29,7 +28,9 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" + workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" ) diff --git a/pkg/controllers/apply_controller_test.go b/pkg/controllers/apply_controller_test.go index a7c90912..c79e3cb3 100644 --- a/pkg/controllers/apply_controller_test.go +++ b/pkg/controllers/apply_controller_test.go @@ -54,7 +54,7 @@ var ( appliedWork = &workv1alpha1.AppliedWork{} ownerRef = metav1.OwnerReference{ APIVersion: workv1alpha1.GroupVersion.String(), - Kind: appliedWork.Kind, + Kind: "AppliedWork", } testGvr = schema.GroupVersionResource{ Group: "apps", @@ -588,10 +588,13 @@ func TestReconcile(t *testing.T) { req: req, wantErr: nil, }, - "Retrieving appliedwork fails": { + "Retrieving appliedwork fails, will create": { reconciler: ApplyWorkReconciler{ client: &test.MockClient{ MockGet: getMock, + MockStatusUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return nil + }, }, spokeDynamicClient: fakeDynamicClient, spokeClient: &test.MockClient{ @@ -602,13 +605,16 @@ func TestReconcile(t *testing.T) { Reason: metav1.StatusReasonNotFound, }} }, + MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + return nil + }, }, restMapper: testMapper{}, recorder: utils.NewFakeRecorder(1), joined: true, }, req: req, - wantErr: errors.New("manifest apply unwarranted; the spec has not changed"), + wantErr: nil, }, "ApplyManifest fails": { reconciler: ApplyWorkReconciler{ @@ -679,7 +685,11 @@ func TestReconcile(t *testing.T) { assert.Containsf(t, err.Error(), testCase.wantErr.Error(), "incorrect error for Testcase %s", testName) } else { if testCase.requeue { - assert.Equal(t, ctrl.Result{RequeueAfter: time.Minute * 5}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) + if testCase.reconciler.joined { + assert.Equal(t, ctrl.Result{RequeueAfter: time.Minute * 5}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) + } else { + assert.Equal(t, ctrl.Result{RequeueAfter: time.Second * 5}, ctrlResult, "incorrect ctrlResult for Testcase %s", testName) + } } assert.Equalf(t, false, ctrlResult.Requeue, "incorrect ctrlResult for Testcase %s", testName) } @@ -719,7 +729,7 @@ func TestSetManifestHashAnnotation(t *testing.T) { manifestObj interface{} isSame bool }{ - "manifest status changed": { + "manifest status changed, same": { manifestObj: func() *appsv1.Deployment { extraObj := manifestObj.DeepCopy() extraObj.Status.ReadyReplicas = 10 @@ -727,7 +737,7 @@ func TestSetManifestHashAnnotation(t *testing.T) { }(), isSame: true, }, - "manifest's has hashAnnotation": { + "manifest's has hashAnnotation, same": { manifestObj: func() *appsv1.Deployment { alterObj := manifestObj.DeepCopy() alterObj.Annotations[manifestHashAnnotation] = utilrand.String(10) @@ -735,7 +745,7 @@ func TestSetManifestHashAnnotation(t *testing.T) { }(), isSame: true, }, - "manifest has extra metadata": { + "manifest has extra metadata, same": { manifestObj: func() *appsv1.Deployment { noObj := manifestObj.DeepCopy() noObj.SetSelfLink(utilrand.String(2)) @@ -747,7 +757,16 @@ func TestSetManifestHashAnnotation(t *testing.T) { }(), isSame: true, }, - "manifest is has changed ownership": { + "manifest has a new appliedWork ownership, need update": { + manifestObj: func() *appsv1.Deployment { + alterObj := manifestObj.DeepCopy() + alterObj.OwnerReferences[0].APIVersion = workv1alpha1.GroupVersion.String() + alterObj.OwnerReferences[0].Kind = workv1alpha1.AppliedWorkKind + return alterObj + }(), + isSame: false, + }, + "manifest is has changed ownership, need update": { manifestObj: func() *appsv1.Deployment { alterObj := manifestObj.DeepCopy() alterObj.OwnerReferences[0].APIVersion = utilrand.String(10) @@ -755,7 +774,7 @@ func TestSetManifestHashAnnotation(t *testing.T) { }(), isSame: false, }, - "manifest has a different label": { + "manifest has a different label, need update": { manifestObj: func() *appsv1.Deployment { alterObj := manifestObj.DeepCopy() alterObj.SetLabels(map[string]string{utilrand.String(5): utilrand.String(10)}) @@ -763,7 +782,7 @@ func TestSetManifestHashAnnotation(t *testing.T) { }(), isSame: false, }, - "manifest has a different annotation": { + "manifest has a different annotation, need update": { manifestObj: func() *appsv1.Deployment { alterObj := manifestObj.DeepCopy() alterObj.SetAnnotations(map[string]string{utilrand.String(5): utilrand.String(10)}) @@ -771,7 +790,7 @@ func TestSetManifestHashAnnotation(t *testing.T) { }(), isSame: false, }, - "manifest has a different spec": { + "manifest has a different spec, need update": { manifestObj: func() *appsv1.Deployment { alterObj := manifestObj.DeepCopy() alterObj.Spec.Replicas = pointer.Int32Ptr(100) @@ -856,10 +875,10 @@ func createObjAndDynamicClient(rawManifest []byte) (unstructured.Unstructured, d unstructuredObj.SetAnnotations(map[string]string{manifestHashAnnotation: validSpecHash}) dynamicClient := fake.NewSimpleDynamicClient(runtime.NewScheme()) dynamicClient.PrependReactor("get", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, unstructuredObj, nil + return true, unstructuredObj.DeepCopy(), nil }) dynamicClient.PrependReactor("patch", "*", func(action testingclient.Action) (handled bool, ret runtime.Object, err error) { - return true, unstructuredObj, nil + return true, unstructuredObj.DeepCopy(), nil }) return *unstructuredObj, dynamicClient, validSpecHash } diff --git a/pkg/controllers/suite_test.go b/pkg/controllers/suite_test.go index 41e90177..c1339d35 100644 --- a/pkg/controllers/suite_test.go +++ b/pkg/controllers/suite_test.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" ) diff --git a/pkg/controllers/work_status_controller.go b/pkg/controllers/work_status_controller.go index c6e258f2..76e3f2e2 100644 --- a/pkg/controllers/work_status_controller.go +++ b/pkg/controllers/work_status_controller.go @@ -19,6 +19,8 @@ package controllers import ( "context" "fmt" + "time" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,8 +36,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" + workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - "time" ) // TODO: merge this with the apply controller diff --git a/pkg/utils/common.go b/pkg/utils/common.go index e162f194..a45709e4 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -32,6 +32,18 @@ func AddOwnerRef(ref metav1.OwnerReference, object metav1.Object) { object.SetOwnerReferences(owners) } +// MergeOwnerReference merges two owner reference arrays. +func MergeOwnerReference(owners, newOwners []metav1.OwnerReference) []metav1.OwnerReference { + for _, newOwner := range newOwners { + if idx := indexOwnerRef(owners, newOwner); idx == -1 { + owners = append(owners, newOwner) + } else { + owners[idx] = newOwner + } + } + return owners +} + // indexOwnerRef returns the index of the owner reference in the slice if found, or -1. func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { for index, r := range ownerReferences { diff --git a/tests/e2e/apply_test.go b/tests/e2e/apply_test.go index 09c0903a..640964fe 100644 --- a/tests/e2e/apply_test.go +++ b/tests/e2e/apply_test.go @@ -2,6 +2,8 @@ package e2e import ( "context" + appv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -16,6 +18,7 @@ import ( utilrand "k8s.io/apimachinery/pkg/util/rand" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" + "sigs.k8s.io/work-api/pkg/controllers" ) type manifestDetails struct { @@ -103,6 +106,8 @@ var WorkCreatedContext = func(description string, manifestFiles []string) bool { ) err = createWork(workObj) + Expect(err).ToNot(HaveOccurred()) + createdWork, err = retrieveWork(workObj.Namespace, workObj.Name) Expect(err).ToNot(HaveOccurred()) }) @@ -121,10 +126,10 @@ var WorkCreatedContext = func(description string, manifestFiles []string) bool { }, &appliedWork) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - + var deploy *appv1.Deployment By("verifying a deployment was created") Eventually(func() error { - _, err := spokeKubeClient.AppsV1().Deployments(mDetails[0].ObjMeta.Namespace). + deploy, err = spokeKubeClient.AppsV1().Deployments(mDetails[0].ObjMeta.Namespace). Get(context.Background(), mDetails[0].ObjMeta.Name, metav1.GetOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -142,10 +147,24 @@ var WorkCreatedContext = func(description string, manifestFiles []string) bool { if err != nil { return false } - appliedCondition := meta.IsStatusConditionTrue(work.Status.Conditions, "Applied") - availableCondition := meta.IsStatusConditionTrue(work.Status.Conditions, "Available") - return appliedCondition && availableCondition + return meta.IsStatusConditionTrue(work.Status.Conditions, controllers.ConditionTypeApplied) }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) + + deployVersion := deploy.GetGeneration() + + By("verifying that corresponding conditions were created") + createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name) + Expect(err).Should(Succeed()) + createdWork, err = updateWork(createdWork) + Expect(err).Should(Succeed()) + + By("verifying a deployment was not modified") + Eventually(func() error { + deploy, err = spokeKubeClient.AppsV1().Deployments(mDetails[0].ObjMeta.Namespace). + Get(context.Background(), mDetails[0].ObjMeta.Name, metav1.GetOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) + Expect(deployVersion).Should(Equal(deploy.GetGeneration())) }) }) } @@ -199,8 +218,8 @@ var WorkUpdateWithDependencyContext = func(description string, initialManifestFi defaultWorkNamespace, initialManifestDetails, ) - err = createWork(workObj) + Expect(err).ToNot(HaveOccurred()) createdWork, err = retrieveWork(workObj.Namespace, workObj.Name) Expect(err).ToNot(HaveOccurred()) }) @@ -213,7 +232,7 @@ var WorkUpdateWithDependencyContext = func(description string, initialManifestFi Expect(err).ToNot(HaveOccurred()) }) - It("should have created the ConfigMap in the new namespace", func() { + It("should have updated the ConfigMap in the new namespace", func() { By("retrieving the existing work and updating it by adding new manifests") Eventually(func() error { createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name) @@ -221,21 +240,18 @@ var WorkUpdateWithDependencyContext = func(description string, initialManifestFi createdWork.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) createdWork, err = updateWork(createdWork) - return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) By("checking if the ConfigMap was created in the new namespace") Eventually(func() error { _, err := spokeKubeClient.CoreV1().ConfigMaps(addedManifestDetails[0].ObjMeta.Namespace).Get(context.Background(), addedManifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) - return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) By("checking if the new Namespace was created ") Eventually(func() error { _, err := spokeKubeClient.CoreV1().Namespaces().Get(context.Background(), addedManifestDetails[1].ObjMeta.Name, metav1.GetOptions{}) - return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) }) @@ -261,8 +277,9 @@ var WorkUpdateWithModifiedManifestContext = func(description string, manifestFil defaultWorkNamespace, manifestDetails, ) - err = createWork(workObj) + Expect(err).ToNot(HaveOccurred()) + createdWork, err = retrieveWork(workObj.Namespace, workObj.Name) Expect(err).ToNot(HaveOccurred()) }) @@ -432,7 +449,6 @@ var MultipleWorkWithSameManifestContext = func(description string, manifestFiles return Context(description, func() { var workOne *workapi.Work var workTwo *workapi.Work - var err error var manifestDetailsOne []manifestDetails var manifestDetailsTwo []manifestDetails @@ -453,24 +469,14 @@ var MultipleWorkWithSameManifestContext = func(description string, manifestFiles }) - AfterEach(func() { - err = deleteWorkResource(workOne) - Expect(err).ToNot(HaveOccurred()) - - err = deleteWorkResource(workTwo) - Expect(err).ToNot(HaveOccurred()) - }) - It("should ignore the duplicate manifest", func() { + By("creating the work one resources") + Expect(createWork(workOne)).To(Succeed()) - By("creating the work resources") - err = createWork(workOne) - Expect(err).ToNot(HaveOccurred()) - - err = createWork(workTwo) - Expect(err).ToNot(HaveOccurred()) + By("creating the work two resources") + Expect(createWork(workTwo)).To(Succeed()) - By("Checking the Applied Work status of each to see if one of the manifest is abandoned.") + By("Checking the appliedWork status of each to see if it is .") Eventually(func() bool { appliedWorkOne, err := retrieveAppliedWork(workOne.Name) if err != nil { @@ -482,10 +488,10 @@ var MultipleWorkWithSameManifestContext = func(description string, manifestFiles return false } - return len(appliedWorkOne.Status.AppliedResources)+len(appliedWorkTwo.Status.AppliedResources) == 1 + return len(appliedWorkOne.Status.AppliedResources)+len(appliedWorkTwo.Status.AppliedResources) == 2 }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) - By("Checking the work status of each works for verification") + By("Checking the work status of each works that both is applied") Eventually(func() bool { workOne, err := retrieveWork(workOne.Namespace, workOne.Name) if err != nil { @@ -497,18 +503,34 @@ var MultipleWorkWithSameManifestContext = func(description string, manifestFiles } workOneCondition := meta.IsStatusConditionTrue(workOne.Status.ManifestConditions[0].Conditions, "Applied") workTwoCondition := meta.IsStatusConditionTrue(workTwo.Status.ManifestConditions[0].Conditions, "Applied") - return (workOneCondition || workTwoCondition) && !(workOneCondition && workTwoCondition) + return workOneCondition && workTwoCondition }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) - By("verifying the resource was garbage collected") - Eventually(func() error { + By("verifying there is only one real resource on the spoke") + deploy, err := spokeKubeClient.AppsV1().Deployments(manifestDetailsOne[0].ObjMeta.Namespace). + Get(context.Background(), manifestDetailsOne[0].ObjMeta.Name, metav1.GetOptions{}) + Expect(err).Should(Succeed()) + Expect(len(deploy.OwnerReferences)).Should(Equal(2)) - return spokeKubeClient.AppsV1().Deployments(manifestDetailsOne[0].ObjMeta.Namespace).Delete(context.Background(), manifestDetailsOne[0].ObjMeta.Name, metav1.DeleteOptions{}) - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - Eventually(func() error { + By("delete the work two resources") + Expect(deleteWorkResource(workTwo)).To(Succeed()) - return spokeKubeClient.AppsV1().Deployments(manifestDetailsTwo[0].ObjMeta.Namespace).Delete(context.Background(), manifestDetailsTwo[0].ObjMeta.Name, metav1.DeleteOptions{}) - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) + By("Delete one work wont' delete the manifest") + Eventually(func() int { + deploy, err = spokeKubeClient.AppsV1().Deployments(manifestDetailsOne[0].ObjMeta.Namespace). + Get(context.Background(), manifestDetailsOne[0].ObjMeta.Name, metav1.GetOptions{}) + Expect(err).Should(Succeed()) + return len(deploy.OwnerReferences) + }, eventuallyTimeout, eventuallyInterval).Should(Equal(1)) + + By("delete the work one resources") + err = deleteWorkResource(workOne) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + deploy, err = spokeKubeClient.AppsV1().Deployments(manifestDetailsOne[0].ObjMeta.Namespace). + Get(context.Background(), manifestDetailsOne[0].ObjMeta.Name, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) }) }) } @@ -581,19 +603,17 @@ func generateManifestDetails(manifestFiles []string) []manifestDetails { } func retrieveAppliedWork(appliedWorkName string) (*workapi.AppliedWork, error) { retrievedAppliedWork := workapi.AppliedWork{} - err := spokeClient.Get(context.Background(), types.NamespacedName{Name: appliedWorkName}, &retrievedAppliedWork) - if err != nil { - return &retrievedAppliedWork, err - } + Eventually(func() error { + return spokeClient.Get(context.Background(), types.NamespacedName{Name: appliedWorkName}, &retrievedAppliedWork) + }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) return &retrievedAppliedWork, nil } func retrieveWork(workNamespace string, workName string) (*workapi.Work, error) { workRetrieved := workapi.Work{} - err := hubClient.Get(context.Background(), types.NamespacedName{Namespace: workNamespace, Name: workName}, &workRetrieved) - if err != nil { - return nil, err - } + Eventually(func() error { + return hubClient.Get(context.Background(), types.NamespacedName{Namespace: workNamespace, Name: workName}, &workRetrieved) + }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) return &workRetrieved, nil } func updateWork(work *workapi.Work) (*workapi.Work, error) { From 6cd17f45affc8b42ee2bac3c5f1d7c7e749ef805 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Fri, 19 Aug 2022 23:33:51 -0700 Subject: [PATCH 6/7] fix lint --- go.mod | 9 ++++++--- go.sum | 13 +++++++++++++ pkg/apis/v1alpha1/zz_generated.deepcopy.go | 2 +- pkg/controllers/apply_controller_test.go | 1 - 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 1c9278ae..3722c294 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( ) require ( + github.com/BurntSushi/toml v0.4.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -55,14 +56,15 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.1 // indirect - golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect + golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect + golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect @@ -71,6 +73,7 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + honnef.co/go/tools v0.3.3 // indirect k8s.io/component-base v0.23.0 // indirect k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect diff --git a/go.sum b/go.sum index 2de1bf51..da9974d8 100644 --- a/go.sum +++ b/go.sum @@ -50,6 +50,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935 github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -606,7 +608,10 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= +golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -633,6 +638,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -683,6 +690,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -861,6 +870,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f h1:OKYpQQVE3DKSc3r3zHVzq46vq5YH7x8xpR3/k9ixmUg= +golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1026,6 +1037,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= +honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= diff --git a/pkg/apis/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/v1alpha1/zz_generated.deepcopy.go index 56e2dff3..515039b5 100644 --- a/pkg/apis/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) diff --git a/pkg/controllers/apply_controller_test.go b/pkg/controllers/apply_controller_test.go index c79e3cb3..ebd7b139 100644 --- a/pkg/controllers/apply_controller_test.go +++ b/pkg/controllers/apply_controller_test.go @@ -51,7 +51,6 @@ import ( var ( fakeDynamicClient = fake.NewSimpleDynamicClient(runtime.NewScheme()) - appliedWork = &workv1alpha1.AppliedWork{} ownerRef = metav1.OwnerReference{ APIVersion: workv1alpha1.GroupVersion.String(), Kind: "AppliedWork", From 2fa480598af8751719c5cff8a653488acbb0d351 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Sat, 20 Aug 2022 00:30:59 -0700 Subject: [PATCH 7/7] remove bad test --- .../apply_controller_integratoin_test.go | 1 - pkg/controllers/apply_controller_test.go | 43 +++++++------------ .../work_status_controller_test.go | 11 ----- 3 files changed, 15 insertions(+), 40 deletions(-) diff --git a/pkg/controllers/apply_controller_integratoin_test.go b/pkg/controllers/apply_controller_integratoin_test.go index 6cac6b97..d8beb957 100644 --- a/pkg/controllers/apply_controller_integratoin_test.go +++ b/pkg/controllers/apply_controller_integratoin_test.go @@ -116,7 +116,6 @@ var _ = Describe("Work Controller", func() { if !meta.IsStatusConditionTrue(resultWork.Status.Conditions, "Applied") { return fmt.Errorf("Exepect condition status of the work to be true") } - return nil }, timeout, interval).Should(Succeed()) }) diff --git a/pkg/controllers/apply_controller_test.go b/pkg/controllers/apply_controller_test.go index ebd7b139..536d3c50 100644 --- a/pkg/controllers/apply_controller_test.go +++ b/pkg/controllers/apply_controller_test.go @@ -218,7 +218,7 @@ func TestApplyManifest(t *testing.T) { } func TestApplyUnstructured(t *testing.T) { - correctObj, correctDynamicClient, correctSpecHash := createObjAndDynamicClient(testManifest.Raw) + correctObj, _, correctSpecHash := createObjAndDynamicClient(testManifest.Raw) testDeploymentDiffSpec := testDeployment.DeepCopy() testDeploymentDiffSpec.Spec.MinReadySeconds = 0 @@ -286,19 +286,6 @@ func TestApplyUnstructured(t *testing.T) { resultBool bool resultErr error }{ - "error during SpecHash Generation / fail": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: fakeDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - joined: true, - }, - workObj: specHashFailObj, - resultBool: false, - resultErr: errors.New("unsupported value"), - }, "not found error looking for object / success due to creation": { reconciler: ApplyWorkReconciler{ client: &test.MockClient{}, @@ -339,20 +326,6 @@ func TestApplyUnstructured(t *testing.T) { resultBool: false, resultErr: errors.New("resource is not managed by the work controller"), }, - "equal spec hash of current vs work object / succeed without updates": { - reconciler: ApplyWorkReconciler{ - client: &test.MockClient{}, - spokeDynamicClient: correctDynamicClient, - spokeClient: &test.MockClient{}, - restMapper: testMapper{}, - recorder: utils.NewFakeRecorder(1), - joined: true, - }, - workObj: correctObj.DeepCopy(), - resultSpecHash: correctSpecHash, - resultBool: false, - resultErr: nil, - }, "unequal spec hash of current vs work object / client patch fail": { reconciler: ApplyWorkReconciler{ spokeDynamicClient: patchFailClient, @@ -699,6 +672,10 @@ func TestReconcile(t *testing.T) { func TestSetManifestHashAnnotation(t *testing.T) { // basic setup manifestObj := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, ObjectMeta: metav1.ObjectMeta{ Name: "Deployment", OwnerReferences: []metav1.OwnerReference{ @@ -717,6 +694,9 @@ func TestSetManifestHashAnnotation(t *testing.T) { Type: appsv1.RecreateDeploymentStrategyType, }, }, + Status: appsv1.DeploymentStatus{ + ReadyReplicas: 1, + }, } // pre-compute the hash preObj := manifestObj.DeepCopy() @@ -728,6 +708,13 @@ func TestSetManifestHashAnnotation(t *testing.T) { manifestObj interface{} isSame bool }{ + "manifest same, same": { + manifestObj: func() *appsv1.Deployment { + extraObj := manifestObj.DeepCopy() + return extraObj + }(), + isSame: true, + }, "manifest status changed, same": { manifestObj: func() *appsv1.Deployment { extraObj := manifestObj.DeepCopy() diff --git a/pkg/controllers/work_status_controller_test.go b/pkg/controllers/work_status_controller_test.go index 6f9005a1..dc6ebcb7 100644 --- a/pkg/controllers/work_status_controller_test.go +++ b/pkg/controllers/work_status_controller_test.go @@ -79,17 +79,6 @@ func TestCalculateNewAppliedWork(t *testing.T) { }, }, }, - "Work resource contains the status of a resource that does not exist within the AppliedWork resource.": { - r: WorkStatusReconciler{joined: true}, - inputWork: inputWorkWithResourceIdentifier, - inputAppliedWork: inputAppliedWork, - expectedNewRes: []v1alpha1.AppliedResourceMeta{ - { - ResourceIdentifier: inputWorkWithResourceIdentifier.Status.ManifestConditions[0].Identifier, - }, - }, - expectedStaleRes: []v1alpha1.AppliedResourceMeta(nil), - }, } for testName, tt := range tests { t.Run(testName, func(t *testing.T) {