Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(server): Dry run always in client mode just for yaml manifest validation even with server side apply #564

Merged
merged 8 commits into from
Jan 22, 2024
77 changes: 34 additions & 43 deletions pkg/sync/sync_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -903,57 +903,48 @@ func (sc *syncContext) ensureCRDReady(name string) error {
})
}

func getDryRunStrategy(serverSideApply, dryRun bool) cmdutil.DryRunStrategy {
if !dryRun {
return cmdutil.DryRunNone
}
if serverSideApply {
return cmdutil.DryRunServer
}
return cmdutil.DryRunClient
}

func (sc *syncContext) applyObject(t *syncTask, dryRun, force, validate bool) (common.ResultCode, string) {
serverSideApply := sc.serverSideApply || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionServerSideApply)

dryRunStrategy := getDryRunStrategy(serverSideApply, dryRun)
dryRunStrategy := cmdutil.DryRunNone
if dryRun {
// irrespective of the dry run mode set in the sync context, always run
// in client dry run mode as the goal is to validate only the
// yaml correctness of the rendered manifests.
// running dry-run in server mode breaks the auto create namespace feature
// https://github.com/argoproj/argo-cd/issues/13874
dryRunStrategy = cmdutil.DryRunClient
}

var err error
var message string
shouldReplace := sc.replace || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionReplace)
applyFn := func(dryRunStrategy cmdutil.DryRunStrategy) (string, error) {
if !shouldReplace {
return sc.resourceOps.ApplyResource(context.TODO(), t.targetObj, dryRunStrategy, force, validate, serverSideApply, sc.serverSideApplyManager, false)
}
if t.liveObj == nil {
return sc.resourceOps.CreateResource(context.TODO(), t.targetObj, dryRunStrategy, validate)
}
// Avoid using `kubectl replace` for CRDs since 'replace' might recreate resource and so delete all CRD instances.
// The same thing applies for namespaces, which would delete the namespace as well as everything within it,
// so we want to avoid using `kubectl replace` in that case as well.
if kube.IsCRD(t.targetObj) || t.targetObj.GetKind() == kubeutil.NamespaceKind {
update := t.targetObj.DeepCopy()
update.SetResourceVersion(t.liveObj.GetResourceVersion())
_, err = sc.resourceOps.UpdateResource(context.TODO(), update, dryRunStrategy)
if err != nil {
return fmt.Sprintf("error when updating: %v", err.Error()), err
// if it is a dry run, disable server side apply, as the goal is to validate only the
// yaml correctness of the rendered manifests.
// running dry-run in server mode breaks the auto create namespace feature
// https://github.com/argoproj/argo-cd/issues/13874
serverSideApply := !dryRun && (sc.serverSideApply || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionServerSideApply))
if shouldReplace {
if t.liveObj != nil {
// Avoid using `kubectl replace` for CRDs since 'replace' might recreate resource and so delete all CRD instances.
// The same thing applies for namespaces, which would delete the namespace as well as everything within it,
// so we want to avoid using `kubectl replace` in that case as well.
if kube.IsCRD(t.targetObj) || t.targetObj.GetKind() == kubeutil.NamespaceKind {
update := t.targetObj.DeepCopy()
update.SetResourceVersion(t.liveObj.GetResourceVersion())
_, err = sc.resourceOps.UpdateResource(context.TODO(), update, dryRunStrategy)
if err == nil {
message = fmt.Sprintf("%s/%s updated", t.targetObj.GetKind(), t.targetObj.GetName())
} else {
message = fmt.Sprintf("error when updating: %v", err.Error())
}
} else {
message, err = sc.resourceOps.ReplaceResource(context.TODO(), t.targetObj, dryRunStrategy, force)
}
return fmt.Sprintf("%s/%s updated", t.targetObj.GetKind(), t.targetObj.GetName()), nil

} else {
message, err = sc.resourceOps.CreateResource(context.TODO(), t.targetObj, dryRunStrategy, validate)
}
return sc.resourceOps.ReplaceResource(context.TODO(), t.targetObj, dryRunStrategy, force)
}

message, err = applyFn(dryRunStrategy)

// DryRunServer fails with "Kind does not support fieldValidation" error for kubernetes server < 1.25
// it fails inside apply.go , o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind) line
// so we retry with DryRunClient that works for all cases, but cause issues with hooks
// Details: https://github.com/argoproj/argo-cd/issues/16177
if dryRunStrategy == cmdutil.DryRunServer && err != nil {
message, err = applyFn(cmdutil.DryRunClient)
} else {
message, err = sc.resourceOps.ApplyResource(context.TODO(), t.targetObj, dryRunStrategy, force, validate, serverSideApply, sc.serverSideApplyManager, false)
}

if err != nil {
return common.ResultCodeSyncFailed, err.Error()
}
Expand Down
20 changes: 0 additions & 20 deletions pkg/sync/sync_context_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"encoding/json"
"errors"
"fmt"
"k8s.io/kubectl/pkg/cmd/util"
"net/http"
"net/http/httptest"
"reflect"
Expand Down Expand Up @@ -540,25 +539,6 @@ func (s *APIServerMock) newHttpServer(t *testing.T, apiFailuresCount int) *httpt
return server
}

func TestGetDryRunStrategy(t *testing.T) {
t.Run("no dry run", func(t *testing.T) {
strategy := getDryRunStrategy(false, false)
assert.Equal(t, util.DryRunNone, strategy)
})
t.Run("no dry run with server side apply", func(t *testing.T) {
strategy := getDryRunStrategy(true, false)
assert.Equal(t, util.DryRunNone, strategy)
})
t.Run("dry run with server side apply", func(t *testing.T) {
strategy := getDryRunStrategy(true, true)
assert.Equal(t, util.DryRunServer, strategy)
})
t.Run("dry run with client side apply", func(t *testing.T) {
strategy := getDryRunStrategy(false, true)
assert.Equal(t, util.DryRunClient, strategy)
})
}

func TestServerResourcesRetry(t *testing.T) {
type fixture struct {
apiServerMock *APIServerMock
Expand Down
7 changes: 6 additions & 1 deletion pkg/utils/kube/resource_ops.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,12 @@ func (k *kubectlResourceOperations) ApplyResource(ctx context.Context, obj *unst
span.SetBaggageItem("kind", obj.GetKind())
span.SetBaggageItem("name", obj.GetName())
defer span.Finish()
k.log.Info(fmt.Sprintf("Applying resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace()))
k.log.WithValues(
"dry-run", [...]string{"none", "client", "server"}[dryRunStrategy],
"manager", manager,
"serverSideApply", serverSideApply,
"serverSideDiff", serverSideDiff).Info(fmt.Sprintf("Applying resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace()))

return k.runResourceCommand(ctx, obj, dryRunStrategy, serverSideDiff, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error {
cleanup, err := k.processKubectlRun("apply")
if err != nil {
Expand Down