diff --git a/api/.svace-dir/shared/snapshots/gitrepo b/api/.svace-dir/shared/snapshots/gitrepo new file mode 160000 index 000000000..135814039 --- /dev/null +++ b/api/.svace-dir/shared/snapshots/gitrepo @@ -0,0 +1 @@ +Subproject commit 1358140392aee1b96023b41cfed98476848fb1b3 diff --git a/api/go.mod b/api/go.mod index 44a7283ce..fb4901c76 100644 --- a/api/go.mod +++ b/api/go.mod @@ -7,7 +7,7 @@ require k8s.io/apimachinery v0.33.0 require ( github.com/fatih/color v1.18.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect diff --git a/api/go.sum b/api/go.sum index fcf6c4573..7bfaeb2e9 100644 --- a/api/go.sum +++ b/api/go.sum @@ -9,8 +9,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= diff --git a/hooks/go/go.mod b/hooks/go/go.mod index c63f80844..4573a9457 100644 --- a/hooks/go/go.mod +++ b/hooks/go/go.mod @@ -21,7 +21,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect diff --git a/hooks/go/go.sum b/hooks/go/go.sum index 80b1a0377..d4aead18c 100644 --- a/hooks/go/go.sum +++ b/hooks/go/go.sum @@ -37,8 +37,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= diff --git a/images/agent/cmd/llvs_ee.go b/images/agent/cmd/llvs_ee.go index 9d79450d6..e7bc3be7a 100644 --- a/images/agent/cmd/llvs_ee.go +++ b/images/agent/cmd/llvs_ee.go @@ -30,12 +30,13 @@ func addLLVSReconciler( commands utils.Commands, cfgParams *config.Config, ) { + log = log.WithName("addLLVSReconciler") if !feature.SnapshotsEnabled() { - log.Info("[addLLVSReconciler] Snapshot feature is disabled") + log.Info("Snapshot feature is disabled") return } - log.Info("[addLLVSReconciler] Snapshot feature is enabled. Adding LLVS reconciler") + log.Info("Snapshot feature is enabled. Adding LLVS reconciler") err := controller.AddReconciler( mgr, @@ -55,7 +56,7 @@ func addLLVSReconciler( ), ) if err != nil { - log.Error(err, "[main] unable to start llvs.NewReconciler") + log.Error(err, "unable to start llvs.NewReconciler") os.Exit(1) } } diff --git a/images/agent/cmd/main.go b/images/agent/cmd/main.go index f72e34c1a..c47e20176 100644 --- a/images/agent/cmd/main.go +++ b/images/agent/cmd/main.go @@ -72,35 +72,37 @@ func main() { os.Exit(1) } - log.Info(fmt.Sprintf("[main] Go Version:%s ", goruntime.Version())) - log.Info(fmt.Sprintf("[main] OS/Arch:Go OS/Arch:%s/%s ", goruntime.GOOS, goruntime.GOARCH)) + mainLog := log.WithName("main") - log.Info(fmt.Sprintf("[main] Feature SnapshotsEnabled: %t", feature.SnapshotsEnabled())) - log.Info(fmt.Sprintf("[main] Feature VolumeCleanupEnabled: %t", feature.VolumeCleanupEnabled())) + mainLog.Info(fmt.Sprintf("Go Version:%s ", goruntime.Version())) + mainLog.Info(fmt.Sprintf("OS/Arch:Go OS/Arch:%s/%s ", goruntime.GOOS, goruntime.GOARCH)) - log.Info("[main] CfgParams has been successfully created") - log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevel, cfgParams.Loglevel)) - log.Info(fmt.Sprintf("[main] %s = %s", config.NodeName, cfgParams.NodeName)) - log.Info(fmt.Sprintf("[main] %s = %s", config.MachineID, cfgParams.MachineID)) - log.Info(fmt.Sprintf("[main] %s = %s", config.ScanInterval, cfgParams.BlockDeviceScanInterval.String())) - log.Info(fmt.Sprintf("[main] %s = %s", config.ThrottleInterval, cfgParams.ThrottleInterval.String())) - log.Info(fmt.Sprintf("[main] %s = %s", config.CmdDeadlineDuration, cfgParams.CmdDeadlineDuration.String())) + mainLog.Info(fmt.Sprintf("Feature SnapshotsEnabled: %t", feature.SnapshotsEnabled())) + mainLog.Info(fmt.Sprintf("Feature VolumeCleanupEnabled: %t", feature.VolumeCleanupEnabled())) + + mainLog.Info("CfgParams has been successfully created") + mainLog.Info(fmt.Sprintf("%s = %s", config.LogLevel, cfgParams.Loglevel)) + mainLog.Info(fmt.Sprintf("%s = %s", config.NodeName, cfgParams.NodeName)) + mainLog.Info(fmt.Sprintf("%s = %s", config.MachineID, cfgParams.MachineID)) + mainLog.Info(fmt.Sprintf("%s = %s", config.ScanInterval, cfgParams.BlockDeviceScanInterval.String())) + mainLog.Info(fmt.Sprintf("%s = %s", config.ThrottleInterval, cfgParams.ThrottleInterval.String())) + mainLog.Info(fmt.Sprintf("%s = %s", config.CmdDeadlineDuration, cfgParams.CmdDeadlineDuration.String())) kConfig, err := kubutils.KubernetesDefaultConfigCreate() if err != nil { - log.Error(err, "[main] unable to KubernetesDefaultConfigCreate") + mainLog.Error(err, "unable to KubernetesDefaultConfigCreate") } - log.Info("[main] kubernetes config has been successfully created.") + mainLog.Info("kubernetes config has been successfully created.") scheme := runtime.NewScheme() for _, f := range resourcesSchemeFuncs { err := f(scheme) if err != nil { - log.Error(err, "[main] unable to add scheme to func") + mainLog.Error(err, "unable to add scheme to func") os.Exit(1) } } - log.Info("[main] successfully read scheme CR") + mainLog.Info("successfully read scheme CR") managerOpts := manager.Options{ Scheme: scheme, @@ -111,17 +113,17 @@ func main() { mgr, err := manager.New(kConfig, managerOpts) if err != nil { - log.Error(err, "[main] unable to manager.New") + mainLog.Error(err, "unable to manager.New") os.Exit(1) } - log.Info("[main] successfully created kubernetes manager") + mainLog.Info("successfully created kubernetes manager") metrics := monitoring.GetMetrics(cfgParams.NodeName) commands := utils.NewCommands() - log.Info("[main] ReTag starts") + mainLog.Info("ReTag starts") if err := commands.ReTag(ctx, log, metrics, bd.DiscovererName); err != nil { - log.Error(err, "[main] unable to run ReTag") + mainLog.Error(err, "unable to run ReTag") } sdsCache := cache.New() @@ -142,7 +144,7 @@ func main() { ), ) if err != nil { - log.Error(err, "[main] unable to controller.RunBlockDeviceController") + mainLog.Error(err, "unable to controller.RunBlockDeviceController") os.Exit(1) } @@ -162,7 +164,7 @@ func main() { ), ) if err != nil { - log.Error(err, "[main] unable to controller.RunLVMVolumeGroupDiscoverController") + mainLog.Error(err, "unable to controller.RunLVMVolumeGroupDiscoverController") os.Exit(1) } @@ -181,7 +183,7 @@ func main() { ), ) if err != nil { - log.Error(err, "[main] unable to run BlockDeviceFilter controller") + mainLog.Error(err, "unable to run BlockDeviceFilter controller") os.Exit(1) } @@ -202,7 +204,7 @@ func main() { ), ) if err != nil { - log.Error(err, "[main] unable to controller.RunLVMVolumeGroupWatcherController") + mainLog.Error(err, "unable to controller.RunLVMVolumeGroupWatcherController") os.Exit(1) } @@ -215,7 +217,7 @@ func main() { rediscoverBlockDevices, rediscoverLVGs, ); err != nil { - log.Error(err, "[main] unable to run scanner") + mainLog.Error(err, "unable to run scanner") os.Exit(1) } }() @@ -238,7 +240,7 @@ func main() { ), ) if err != nil { - log.Error(err, "[main] unable to controller.RunLVMVolumeGroupWatcherController") + mainLog.Error(err, "unable to controller.RunLVMVolumeGroupWatcherController") os.Exit(1) } @@ -258,29 +260,29 @@ func main() { ), ) if err != nil { - log.Error(err, "[main] unable to controller.RunLVMLogicalVolumeExtenderWatcherController") + mainLog.Error(err, "unable to controller.RunLVMLogicalVolumeExtenderWatcherController") os.Exit(1) } addLLVSReconciler(mgr, log, metrics, sdsCache, commands, cfgParams) if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - log.Error(err, "[main] unable to mgr.AddHealthzCheck") + mainLog.Error(err, "unable to mgr.AddHealthzCheck") os.Exit(1) } - log.Info("[main] successfully AddHealthzCheck") + mainLog.Info("successfully AddHealthzCheck") if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - log.Error(err, "[main] unable to mgr.AddReadyzCheck") + mainLog.Error(err, "unable to mgr.AddReadyzCheck") os.Exit(1) } - log.Info("[main] successfully AddReadyzCheck") + mainLog.Info("successfully AddReadyzCheck") err = mgr.Start(ctx) if err != nil { - log.Error(err, "[main] unable to mgr.Start") + mainLog.Error(err, "unable to mgr.Start") os.Exit(1) } - log.Info("[main] successfully starts the manager") + mainLog.Info("successfully starts the manager") } diff --git a/images/agent/go.mod b/images/agent/go.mod index e4d4d1818..7e5731bd1 100644 --- a/images/agent/go.mod +++ b/images/agent/go.mod @@ -5,7 +5,7 @@ go 1.24.9 require ( github.com/deckhouse/sds-node-configurator/api v0.0.0-20250130211935-b68366dfd0f8 github.com/deckhouse/sds-node-configurator/lib/go/common v0.0.0-00010101000000-000000000000 - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/google/go-cmp v0.7.0 github.com/gosimple/slug v1.14.0 github.com/onsi/ginkgo/v2 v2.21.0 diff --git a/images/agent/go.sum b/images/agent/go.sum index 829daafbf..a0f7906a4 100644 --- a/images/agent/go.sum +++ b/images/agent/go.sum @@ -16,8 +16,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= diff --git a/images/agent/internal/controller/bd/discoverer.go b/images/agent/internal/controller/bd/discoverer.go index 714a9fd42..a291fc9d8 100644 --- a/images/agent/internal/controller/bd/discoverer.go +++ b/images/agent/internal/controller/bd/discoverer.go @@ -91,35 +91,37 @@ func (d *Discoverer) Name() string { } func (d *Discoverer) Discover(ctx context.Context) (controller.Result, error) { - d.log.Info("[RunBlockDeviceController] Reconciler starts BlockDevice resources reconciliation") + log := d.log.WithName("Discover") + log.Info("Reconciler starts BlockDevice resources reconciliation") shouldRequeue, err := d.blockDeviceReconcile(ctx) if err != nil { - d.log.Error(err, "reconciling block devices") + log.Error(err, "reconciling block devices") } if shouldRequeue { - d.log.Warning(fmt.Sprintf("[RunBlockDeviceController] Reconciler needs a retry in %f", d.cfg.BlockDeviceScanInterval.Seconds())) + log.Warning("Reconciler needs a retry", "requeueAfter", d.cfg.BlockDeviceScanInterval.Seconds()) return controller.Result{RequeueAfter: d.cfg.BlockDeviceScanInterval}, nil } - d.log.Info("[RunBlockDeviceController] Reconciler successfully ended BlockDevice resources reconciliation") + log.Info("Reconciler successfully ended BlockDevice resources reconciliation") return controller.Result{}, err } func (d *Discoverer) blockDeviceReconcile(ctx context.Context) (bool, error) { + log := d.log.WithName("blockDeviceReconcile") reconcileStart := time.Now() - d.log.Info("[RunBlockDeviceController] START reconcile of block devices") + log.Info("START reconcile of block devices") candidates, err := d.getBlockDeviceCandidates() if err != nil { - d.log.Error(err, "[RunBlockDeviceController] unable to get block device candidates") + log.Error(err, "unable to get block device candidates") return true, fmt.Errorf("getting block device candidates: %w", err) } - d.log.Debug("[RunBlockDeviceController] Getting block device filters") + log.Debug("Getting block device filters") selector, err := d.blockDeviceFilterClient.GetAPIBlockDeviceFilters(ctx, DiscovererName) if err != nil { - d.log.Error(err, "[RunBlockDeviceController] unable to GetAPIBlockDeviceFilters") + log.Error(err, "unable to GetAPIBlockDeviceFilters") return true, fmt.Errorf("getting BlockDeviceFilters from API: %w", err) } deviceMatchesSelector := func(blockDevice *v1alpha1.BlockDevice) bool { @@ -128,75 +130,80 @@ func (d *Discoverer) blockDeviceReconcile(ctx context.Context) (bool, error) { apiBlockDevices, err := d.bdCl.GetAPIBlockDevices(ctx, DiscovererName, nil) if err != nil { - d.log.Error(err, "[RunBlockDeviceController] unable to GetAPIBlockDevices") + log.Error(err, "unable to GetAPIBlockDevices") return true, fmt.Errorf("getting BlockDevices from API: %w", err) } if len(apiBlockDevices) == 0 { - d.log.Debug("[RunBlockDeviceController] no BlockDevice resources were found") + log.Debug("no BlockDevice resources were found") } blockDevicesToDelete := make([]*v1alpha1.BlockDevice, 0, len(candidates)) // create new API devices for _, candidate := range candidates { + log := log.WithValues("candidate", candidate) blockDevice, exist := apiBlockDevices[candidate.Name] if exist { + log := log.WithValues("blockDevice", blockDevice) addToDeleteListIfNotMatched := func(blockDevice v1alpha1.BlockDevice) { if !deviceMatchesSelector(&blockDevice) { - d.log.Debug("[RunBlockDeviceController] block device doesn't match labels and will be deleted") + log.Debug("block device doesn't match labels and will be deleted") blockDevicesToDelete = append(blockDevicesToDelete, &blockDevice) } } if !candidate.HasBlockDeviceDiff(blockDevice) { - d.log.Debug(fmt.Sprintf(`[RunBlockDeviceController] no data to update for block device, name: "%s"`, candidate.Name)) + log.Debug("no data to update for block device") addToDeleteListIfNotMatched(blockDevice) continue } if err = d.updateAPIBlockDevice(ctx, blockDevice, candidate); err != nil { - d.log.Error(err, "[RunBlockDeviceController] unable to update blockDevice, name: %s", blockDevice.Name) + log.Error(err, "unable to update blockDevice") continue } - d.log.Info(fmt.Sprintf(`[RunBlockDeviceController] updated APIBlockDevice, name: %s`, blockDevice.Name)) + log.Info("updated APIBlockDevice") addToDeleteListIfNotMatched(blockDevice) continue } device := candidate.AsAPIBlockDevice() if !deviceMatchesSelector(&device) { - d.log.Debug("[RunBlockDeviceController] block device doesn't match labels and will not be created") + log.Debug("block device doesn't match labels and will not be created", + "selector", selector, + "deviceLabels", device.Labels) continue } err := d.createAPIBlockDevice(ctx, &device) if err != nil { - d.log.Error(err, fmt.Sprintf("[RunBlockDeviceController] unable to create block device blockDevice, name: %s", candidate.Name)) + log.Error(err, "unable to create block device") continue } - d.log.Info(fmt.Sprintf("[RunBlockDeviceController] created new APIBlockDevice: %s", candidate.Name)) + log.Info("created new APIBlockDevice") // add new api device to the map, so it won't be deleted as fantom apiBlockDevices[candidate.Name] = device } // delete devices doesn't match the filters - for _, device := range blockDevicesToDelete { - name := device.Name - err := d.deleteAPIBlockDevice(ctx, device) + for _, blockDeviceToDelete := range blockDevicesToDelete { + log := log.WithValues("blockDeviceToDelete", blockDeviceToDelete) + name := blockDeviceToDelete.Name + err := d.deleteAPIBlockDevice(ctx, blockDeviceToDelete) if err != nil { - d.log.Error(err, fmt.Sprintf("[RunBlockDeviceController] unable to delete APIBlockDevice, name: %s", name)) + log.Error(err, "unable to delete APIBlockDevice") continue } delete(apiBlockDevices, name) - d.log.Info(fmt.Sprintf("[RunBlockDeviceController] device deleted, name: %s", name)) + log.Info("device deleted") } // delete api device if device no longer exists, but we still have its api resource d.removeDeprecatedAPIDevices(ctx, candidates, apiBlockDevices) - d.log.Info("[RunBlockDeviceController] END reconcile of block devices") + log.Info("END reconcile of block devices") d.metrics.ReconcileDuration(DiscovererName).Observe(d.metrics.GetEstimatedTimeInSeconds(reconcileStart)) d.metrics.ReconcilesCountTotal(DiscovererName).Inc() @@ -208,82 +215,91 @@ func (d *Discoverer) removeDeprecatedAPIDevices( candidates []internal.BlockDeviceCandidate, apiBlockDevices map[string]v1alpha1.BlockDevice, ) { + log := d.log.WithName("removeDeprecatedAPIDevices") actualCandidates := make(map[string]struct{}, len(candidates)) for _, candidate := range candidates { actualCandidates[candidate.Name] = struct{}{} } - for name, device := range apiBlockDevices { + for deviceName, device := range apiBlockDevices { + log := log.WithValues( + "deviceName", deviceName, + "device", device) if shouldDeleteBlockDevice(device, actualCandidates, d.cfg.NodeName) { err := d.deleteAPIBlockDevice(ctx, &device) if err != nil { - d.log.Error(err, fmt.Sprintf("[RunBlockDeviceController] unable to delete APIBlockDevice, name: %s", name)) + log.Error(err, "unable to delete APIBlockDevice") continue } - delete(apiBlockDevices, name) - d.log.Info(fmt.Sprintf("[RunBlockDeviceController] device deleted, name: %s", name)) + delete(apiBlockDevices, deviceName) + log.Info("device deleted") } } } func (d *Discoverer) getBlockDeviceCandidates() ([]internal.BlockDeviceCandidate, error) { + log := d.log.WithName("getBlockDeviceCandidates") var candidates []internal.BlockDeviceCandidate devices, _ := d.sdsCache.GetDevices() if len(devices) == 0 { - d.log.Debug("[GetBlockDeviceCandidates] no devices found, returns empty candidates") + log.Debug("no devices found, returns empty candidates") return candidates, nil } filteredDevices, err := d.filterDevices(devices) if err != nil { - d.log.Error(err, "[GetBlockDeviceCandidates] unable to filter devices") + log.Error(err, "unable to filter devices") return nil, fmt.Errorf("filtering devices: %w", err) } if len(filteredDevices) == 0 { - d.log.Debug("[GetBlockDeviceCandidates] no filtered devices left, returns empty candidates") + log.Debug("no filtered devices left, returns empty candidates") return candidates, nil } pvs, _ := d.sdsCache.GetPVs() if len(pvs) == 0 { - d.log.Debug("[GetBlockDeviceCandidates] no PVs found") + log.Debug("no PVs found") } var delFlag bool candidates = make([]internal.BlockDeviceCandidate, 0, len(filteredDevices)) for _, device := range filteredDevices { - d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] Process device: %+v", device)) + log := log.WithValues("device", device) + log.Trace("Processing device") candidate := internal.NewBlockDeviceCandidateByDevice(&device, d.cfg.NodeName, d.cfg.MachineID) - d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] Get following candidate: %+v", candidate)) + log.Trace("Get candidate", "candidate", candidate) candidateName := d.createCandidateName(candidate, devices) if candidateName == "" { - d.log.Trace("[GetBlockDeviceCandidates] candidateName is empty. Skipping device") + log.Trace("candidateName is empty. Skipping device") continue } candidate.Name = candidateName - d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] Generated a unique candidate name: %s", candidate.Name)) + log.Trace("Generated a unique candidate name", "name", candidate.Name) delFlag = false for _, pv := range pvs { if pv.PVName == device.Name { - d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] The device is a PV. Found PV name: %s", pv.PVName)) + log := log.WithValues("pvName", pv.PVName) + log.Trace("The device is a PV") if candidate.FSType == internal.LVMFSType { hasTag, lvmVGName := utils.ReadValueFromTags(pv.VGTags, internal.LVMVolumeGroupTag) if hasTag { - d.log.Debug(fmt.Sprintf("[GetBlockDeviceCandidates] PV %s of BlockDevice %s has tag, fill the VG information", pv.PVName, candidate.Name)) + log.Debug("PV of BlockDevice has tag, fill the VG information") candidate.PVUuid = pv.PVUuid candidate.VGUuid = pv.VGUuid candidate.ActualVGNameOnTheNode = pv.VGName candidate.LVMVolumeGroupName = lvmVGName } else { if len(pv.VGName) != 0 { - d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] The device is a PV with VG named %s that lacks our tag %s. Removing it from Kubernetes", pv.VGName, internal.LVMTags[0])) + log.Trace("The device is a PV with VG that lacks our tag. Removing it from Kubernetes", + "vgName", pv.VGName, + "tags", internal.LVMTags) delFlag = true } else { candidate.PVUuid = pv.PVUuid @@ -292,11 +308,11 @@ func (d *Discoverer) getBlockDeviceCandidates() ([]internal.BlockDeviceCandidate } } } - d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] delFlag: %t", delFlag)) if delFlag { + log.Trace("has delFlag. Skipping") continue } - d.log.Trace(fmt.Sprintf("[GetBlockDeviceCandidates] configured candidate %+v", candidate)) + log.Trace("configured candidate") candidates = append(candidates, candidate) } @@ -338,7 +354,8 @@ func visitParents(devicesByKName map[string]*internal.Device, device *internal.D // In mpath case we should copy serial and wwn from the parent device // Also mpath devices appears once but their parents multiple times. So only way to filter them out is to remove them by "fstype": "mpath_member" func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device, error) { - d.log.Trace(fmt.Sprintf("[filterDevices] devices before type filtration: %+v", devices)) + log := d.log.WithName("filterDevices") + log.Trace("devices before type filtration", "devices", devices) filteredDevices := slices.Clone(devices) start := time.Now() @@ -350,17 +367,18 @@ func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device } firstDevice, alreadyExists := devicesByKName[device.KName] if alreadyExists { - d.log.Error(ErrDeviceListInvalid, "second device with same kname", "first", firstDevice, "second", device) + log.Error(ErrDeviceListInvalid, "second device with same kname", "first", firstDevice, "second", device) return devices, fmt.Errorf("%w: second device with kname %s found", ErrDeviceListInvalid, device.KName) } devicesByKName[device.KName] = &device } - d.log.Trace("[filterDevices] Made map by KName", "duration", time.Since(start)) + log.Trace("Made map by KName", "duration", time.Since(start)) start = time.Now() // feel up missing serial and wwn for mpath and partitions for i := range filteredDevices { device := &filteredDevices[i] + log := log.WithValues("device", device) if device.Serial == "" { found, err := visitParents(devicesByKName, device, func(parent *internal.Device) bool { @@ -380,7 +398,7 @@ func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device } if !found { - d.log.Trace(fmt.Sprintf("[filterDevices] Can't find serial for device %s, kname: %s, pkname: %s", device.Name, device.KName, device.PkName)) + log.Trace("Can't find serial for device") } } @@ -402,19 +420,20 @@ func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device } if !found { - d.log.Trace(fmt.Sprintf("[filterDevices] Can't find wwn for device %s, kname: %s, pkname: %s", device.Name, device.KName, device.PkName)) + log.Trace("Can't find wwn for device") } } } - d.log.Trace("Found missing Serial and Wwn", "duration", time.Since(start)) + log.Trace("Found missing Serial and Wwn", "duration", time.Since(start)) // deleting parent devices // making pkname set pkNames := make(map[string]struct{}, len(filteredDevices)) for _, device := range filteredDevices { + log := log.WithValues("device", device) if device.PkName != "" { - d.log.Trace(fmt.Sprintf("[filterDevices] find parent %s for child : %+v.", device.PkName, device)) + log.Trace("find parent for child") pkNames[device.PkName] = struct{}{} } } @@ -422,30 +441,27 @@ func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device filteredDevices = slices.DeleteFunc( filteredDevices, func(device internal.Device) bool { + log := log.WithValues("device", device) if device.FSType == "mpath_member" { - d.log.Trace("[filterDevices] filtered out", "name", device.Name, "kname", device.KName, "reason", "mpath_member") + log.Trace("filtered out", "reason", "mpath_member") return true } if strings.HasPrefix(device.Name, internal.DRBDName) { - d.log.Trace("[filterDevices] filtered out", "name", device.Name, "kname", device.KName, "reason", "drbd") + log.Trace("filtered out", "reason", "drbd") return true } if !hasValidType(device.Type) { - d.log.Trace( - "[filterDevices] filtered out", - "name", device.Name, - "kname", device.KName, + log.Trace( + "filtered out", "reason", "type", "type", device.Type, ) return true } if !hasValidFSType(device.FSType) { - d.log.Trace( - "[filterDevices] filtered out", - "name", device.Name, - "kname", device.KName, + log.Trace( + "filtered out", "reason", "fstype", "fstype", device.FSType, ) @@ -454,10 +470,8 @@ func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device _, hasChildren := pkNames[device.KName] if hasChildren && device.FSType != internal.LVMFSType { - d.log.Trace( - "[filterDevices] filtered out", - "name", device.Name, - "kname", device.KName, + log.Trace( + "filtered out", "reason", "has children but not LVM", "fstype", device.FSType, "has_children", hasChildren, @@ -467,10 +481,8 @@ func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device validSize, err := hasValidSize(device.Size) if err != nil || !validSize { - d.log.Trace( - "[filterDevices] filtered out", - "name", device.Name, - "kname", device.KName, + log.Trace( + "filtered out", "reason", "invalid size", "size", device.Size, ) @@ -481,55 +493,65 @@ func (d *Discoverer) filterDevices(devices []internal.Device) ([]internal.Device }, ) - d.log.Trace(fmt.Sprintf("[filterDevices] final filtered devices: %+v", filteredDevices)) + log.Trace("final filtered devices", "devices", filteredDevices) return filteredDevices, nil } func (d *Discoverer) createCandidateName(candidate internal.BlockDeviceCandidate, devices []internal.Device) string { + log := d.log.WithName("createCandidateName").WithValues("path", candidate.Path) if len(candidate.Serial) == 0 { - d.log.Trace(fmt.Sprintf("[CreateCandidateName] Serial number is empty for device: %s", candidate.Path)) + log.Trace("Serial number is empty for device") if candidate.Type == internal.PartType { if len(candidate.PartUUID) == 0 { - d.log.Warning(fmt.Sprintf("[CreateCandidateName] Type = part and cannot get PartUUID; skipping this device, path: %s", candidate.Path)) + log.Warning("Type = part and cannot get PartUUID; skipping this device") return "" } - d.log.Trace(fmt.Sprintf("[CreateCandidateName] Type = part and PartUUID is not empty; skiping getting serial number for device: %s", candidate.Path)) + log.Trace("Type = part and PartUUID is not empty; skiping getting serial number for device") } else { - d.log.Debug(fmt.Sprintf("[CreateCandidateName] Serial number is empty and device type is not part; trying to obtain serial number or its equivalent for device: %s, with type: %s", candidate.Path, candidate.Type)) + log.Debug("Serial number is empty and device type is not part; trying to obtain serial number or its equivalent", + "type", candidate.Type) switch candidate.Type { case internal.MultiPathType: - d.log.Debug(fmt.Sprintf("[CreateCandidateName] device %s type = %s; get serial number from parent device.", candidate.Path, candidate.Type)) - d.log.Trace(fmt.Sprintf("[CreateCandidateName] device: %+v. Device list: %+v", candidate, devices)) + log.Debug("device type = MultiPath; get serial number from parent device", + "type", candidate.Type) + log.Trace("device and device list", + "candidate", candidate, + "devices", devices) serial, err := getSerialForMultipathDevice(candidate, devices) if err != nil { - d.log.Warning(fmt.Sprintf("[CreateCandidateName] Unable to obtain serial number or its equivalent; skipping device: %s. Error: %s", candidate.Path, err)) + log.Warning("Unable to obtain serial number or its equivalent; skipping device", + "error", err) return "" } candidate.Serial = serial - d.log.Info(fmt.Sprintf("[CreateCandidateName] Successfully obtained serial number or its equivalent: %s for device: %s", candidate.Serial, candidate.Path)) + log.Info("Successfully obtained serial number or its equivalent", + "serial", candidate.Serial) default: isMdRaid := false matched, err := regexp.MatchString(`raid.*`, candidate.Type) if err != nil { - d.log.Error(err, "[CreateCandidateName] failed to match regex - unable to determine if the device is an mdraid. Attempting to retrieve serial number directly from the device") + log.Error(err, "failed to match regex - unable to determine if the device is an mdraid. Attempting to retrieve serial number directly from the device") } else if matched { - d.log.Trace("[CreateCandidateName] device is mdraid") + log.Trace("device is mdraid") isMdRaid = true } serial, err := readSerialBlockDevice(candidate.Path, isMdRaid) if err != nil { - d.log.Warning(fmt.Sprintf("[CreateCandidateName] Unable to obtain serial number or its equivalent; skipping device: %s. Error: %s", candidate.Path, err)) + log.Warning("Unable to obtain serial number or its equivalent; skipping device", + "error", err) return "" } - d.log.Info(fmt.Sprintf("[CreateCandidateName] Successfully obtained serial number or its equivalent: %s for device: %s", serial, candidate.Path)) + log.Info("Successfully obtained serial number or its equivalent", + "serial", serial) candidate.Serial = serial } } } - d.log.Trace(fmt.Sprintf("[CreateCandidateName] Serial number is now: %s. Creating candidate name", candidate.Serial)) + log.Trace("Serial number is now set. Creating candidate name", + "serial", candidate.Serial) return createUniqDeviceName(candidate) } @@ -630,11 +652,6 @@ func hasValidSize(size resource.Quantity) (bool, error) { return size.Value() >= limitSize.Value(), nil } -func isParent(kName string, pkNames map[string]struct{}) bool { - _, ok := pkNames[kName] - return ok -} - func hasValidType(deviceType string) bool { for _, invalidType := range internal.InvalidDeviceTypes { if deviceType == invalidType { diff --git a/images/agent/internal/controller/controller.go b/images/agent/internal/controller/controller.go index cec2a4ca1..2f08269d0 100644 --- a/images/agent/internal/controller/controller.go +++ b/images/agent/internal/controller/controller.go @@ -18,7 +18,6 @@ package controller import ( "context" - "fmt" "reflect" "time" @@ -75,7 +74,7 @@ func AddReconciler[T client.Object]( panic("T is not a struct pointer") } - tname := t.Elem().Name() + typeName := t.Elem().Name() c, err := controller.New( reconciler.Name(), @@ -102,34 +101,36 @@ func AddReconciler[T client.Object]( e event.TypedCreateEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request], ) { + objLog := log.WithName("CreateFunc").WithValues("type", typeName, "name", e.Object.GetName()) if !reconciler.ShouldReconcileCreate(e.Object) { - log.Debug(fmt.Sprintf("createFunc skipped a request for the %s %s to the Reconcilers queue", tname, e.Object.GetName())) + objLog.Debug("createFunc skipped a request to the Reconcilers queue") return } - log.Info(fmt.Sprintf("createFunc got a create event for the %s, name: %s", tname, e.Object.GetName())) + objLog.Info("createFunc got a create event") request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("createFunc added a request for the %s %s to the Reconcilers queue", tname, e.Object.GetName())) + objLog.Info("createFunc added a request to the Reconcilers queue") }, UpdateFunc: func( _ context.Context, e event.TypedUpdateEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request], ) { - log.Info(fmt.Sprintf("UpdateFunc got a update event for the %s %s", tname, e.ObjectNew.GetName())) + objLog := log.WithName("UpdateFunc").WithValues("type", typeName, "name", e.ObjectNew.GetName()) + objLog.Info("UpdateFunc got a update event") if !reconciler.ShouldReconcileUpdate(e.ObjectOld, e.ObjectNew) { - log.Debug(fmt.Sprintf("updateFunc skipped a request for the %s %s to the Reconcilers queue", tname, e.ObjectNew.GetName())) + objLog.Debug("updateFunc skipped a request to the Reconcilers queue") return } request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("updateFunc added a request for the %s %s to the Reconcilers queue", tname, e.ObjectNew.GetName())) + objLog.Info("updateFunc added a request to the Reconcilers queue") }, }, ), @@ -160,7 +161,8 @@ func AddDiscoverer( func makeDiscovererDispatcher(log logger.Logger, discoverer Discoverer) reconcile.Func { return reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[DiscovererDispatcher] %s discoverer starts", discoverer.Name())) + log := log.WithName("DiscovererDispatcher").WithValues("discovererName", discoverer.Name()) + log.Info("discoverer starts") result, err := discoverer.Discover(ctx) @@ -176,18 +178,19 @@ func makeReconcileDispatcher[T client.Object]( cl := mgr.GetClient() return reconcile.Func(func(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { // load object being reconciled - log.Info(fmt.Sprintf("[ReconcileDispatcher] Reconciler starts to reconcile the request %s", req.NamespacedName.String())) + log := log.WithName("ReconcileDispatcher").WithValues("namespacedName", req.NamespacedName) + log.Info("Reconciler starts to reconcile the request") t := reflect.TypeFor[T]() obj := reflect.New(t.Elem()).Interface().(T) if err := cl.Get(ctx, req.NamespacedName, obj); err != nil { if errors.IsNotFound(err) { - log.Warning(fmt.Sprintf("[ReconcileDispatcher] seems like the object was deleted as unable to get it, err: %s. Stop to reconcile", err.Error())) + log.Warning("seems like the object was deleted as unable to get it. Stop to reconcile", "error", err) return reconcile.Result{}, nil } - log.Error(err, fmt.Sprintf("[ReconcileDispatcher] unable to get an object by NamespacedName %s", req.NamespacedName.String())) + log.Error(err, "unable to get an object by NamespacedName") return reconcile.Result{}, err } diff --git a/images/agent/internal/controller/llv/llvs_ee.go b/images/agent/internal/controller/llv/llvs_ee.go index 51bf76cef..e99159cc9 100644 --- a/images/agent/internal/controller/llv/llvs_ee.go +++ b/images/agent/internal/controller/llv/llvs_ee.go @@ -10,7 +10,6 @@ package llv import ( "context" "errors" - "fmt" "k8s.io/apimachinery/pkg/types" @@ -19,13 +18,14 @@ import ( ) func (r *Reconciler) handleLLVSSource(ctx context.Context, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (string, bool, error) { + log := r.log.WithName("handleLLVSSource").WithValues("llvName", llv.Name, "sourceName", llv.Spec.Source.Name) if !feature.SnapshotsEnabled() { return "", false, errors.New("LVMLocalVolumeSnapshot as a source is not supported: snapshot feature is disabled") } sourceLLVS := &v1alpha1.LVMLogicalVolumeSnapshot{} if err := r.cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceLLVS); err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get source LVMLogicalVolumeSnapshot %s for the LVMLogicalVolume %s", llv.Spec.Source.Name, llv.Name)) + log.Error(err, "unable to get source LVMLogicalVolumeSnapshot") return "", true, err } diff --git a/images/agent/internal/controller/llv/reconciler.go b/images/agent/internal/controller/llv/reconciler.go index 900dcf476..b18157f1d 100644 --- a/images/agent/internal/controller/llv/reconciler.go +++ b/images/agent/internal/controller/llv/reconciler.go @@ -156,7 +156,9 @@ func (r *Reconciler) ShouldReconcileCreate(_ *v1alpha1.LVMLogicalVolume) bool { // ShouldReconcileUpdate implements controller.Reconciler. func (r *Reconciler) ShouldReconcileUpdate(objectOld *v1alpha1.LVMLogicalVolume, objectNew *v1alpha1.LVMLogicalVolume) bool { - r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] got an update event for the LVMLogicalVolume: %s", objectNew.GetName())) + log := r.log.WithName("ShouldReconcileUpdate") + log.Info("got an update event for the LVMLogicalVolume", + "llvName", objectNew.GetName()) // TODO: Figure out how to log it in our logger. if r.cfg.Loglevel == "4" { @@ -166,7 +168,8 @@ func (r *Reconciler) ShouldReconcileUpdate(objectOld *v1alpha1.LVMLogicalVolume, } if reflect.DeepEqual(objectOld.Spec, objectNew.Spec) && objectNew.DeletionTimestamp == nil { - r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] no target changes were made for the LVMLogicalVolume %s. No need to reconcile the request", objectNew.Name)) + log.Info("no target changes were made for the LVMLogicalVolume. No need to reconcile the request", + "llvName", objectNew.Name) return false } @@ -179,12 +182,19 @@ func (r *Reconciler) Reconcile( req controller.ReconcileRequest[*v1alpha1.LVMLogicalVolume], ) (controller.Result, error) { llv := req.Object - r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] Reconciler starts reconciliation of the LVMLogicalVolume: %s", llv.Name)) + log := r.log.WithName("Reconcile") + if llv != nil { + log = log.WithValues( + "llvName", llv.Name, + "lvgName", llv.Spec.LVMVolumeGroupName) + } + log.Info("Reconciler starts reconciliation of the LVMLogicalVolume") lvg, err := r.lvgCl.GetLVMVolumeGroup(ctx, llv.Spec.LVMVolumeGroupName) if err != nil { if k8serr.IsNotFound(err) { - r.log.Error(err, fmt.Sprintf("[Reconcile] LVMVolumeGroup %s not found for LVMLogicalVolume %s. Retry in %s", llv.Spec.LVMVolumeGroupName, llv.Name, r.cfg.VolumeGroupScanInterval.String())) + log.Error(err, "LVMVolumeGroup not found for LVMLogicalVolume", + "retryIn", r.cfg.VolumeGroupScanInterval) err = r.llvCl.UpdatePhaseIfNeeded( ctx, llv, @@ -192,7 +202,7 @@ func (r *Reconciler) Reconcile( fmt.Sprintf("LVMVolumeGroup %s not found", llv.Spec.LVMVolumeGroupName), ) if err != nil { - r.log.Error(err, fmt.Sprintf("[Reconcile] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") return controller.Result{}, err } @@ -208,74 +218,75 @@ func (r *Reconciler) Reconcile( fmt.Sprintf("Unable to get selected LVMVolumeGroup, err: %s", err.Error()), ) if err != nil { - r.log.Error(err, fmt.Sprintf("[Reconcile] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") } return controller.Result{}, err } if !utils.LVGBelongsToNode(lvg, r.cfg.NodeName) { - r.log.Info(fmt.Sprintf("[Reconcile] the LVMVolumeGroup %s of the LVMLogicalVolume %s does not belongs to the current node: %s. Reconciliation stopped", lvg.Name, llv.Name, r.cfg.NodeName)) + log.Info("the LVMVolumeGroup of the LVMLogicalVolume does not belongs to the current node. Reconciliation stopped", "nodeName", r.cfg.NodeName) return controller.Result{}, nil } - r.log.Info(fmt.Sprintf("[Reconcile] the LVMVolumeGroup %s of the LVMLogicalVolume %s belongs to the current node: %s. Reconciliation continues", lvg.Name, llv.Name, r.cfg.NodeName)) + log.Info("the LVMVolumeGroup of the LVMLogicalVolume belongs to the current node. Reconciliation continues", "nodeName", r.cfg.NodeName) // this case prevents the unexpected behavior when the controller runs up with existing LVMLogicalVolumes if vgs, _ := r.sdsCache.GetVGs(); len(vgs) == 0 { - r.log.Warning(fmt.Sprintf("[RunLVMLogicalVolumeWatcherController] unable to reconcile the request as no VG was found in the cache. Retry in %s", r.cfg.VolumeGroupScanInterval.String())) + log.Warning("unable to reconcile the request as no VG was found in the cache", "retryIn", r.cfg.VolumeGroupScanInterval) return controller.Result{RequeueAfter: r.cfg.VolumeGroupScanInterval}, nil } - r.log.Debug(fmt.Sprintf("[Reconcile] tries to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + log.Debug("tries to add the finalizer to the LVMLogicalVolume", "finalizer", internal.SdsNodeConfiguratorFinalizer) added, err := r.addLLVFinalizerIfNotExist(ctx, llv) if err != nil { - r.log.Error(err, fmt.Sprintf("[Reconcile] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") return controller.Result{}, err } if added { - r.log.Debug(fmt.Sprintf("[Reconcile] successfully added the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + log.Debug("successfully added the finalizer to the LVMLogicalVolume", "finalizer", internal.SdsNodeConfiguratorFinalizer) } else { - r.log.Debug(fmt.Sprintf("[Reconcile] no need to add the finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + log.Debug("no need to add the finalizer to the LVMLogicalVolume", "finalizer", internal.SdsNodeConfiguratorFinalizer) } - r.log.Info(fmt.Sprintf("[Reconcile] starts to validate the LVMLogicalVolume %s", llv.Name)) + log.Info("starts to validate the LVMLogicalVolume") valid, reason := r.validateLVMLogicalVolume(llv, lvg) if !valid { - r.log.Warning(fmt.Sprintf("[Reconcile] the LVMLogicalVolume %s is not valid, reason: %s", llv.Name, reason)) + log.Warning("the LVMLogicalVolume is not valid", "reason", reason) err = r.llvCl.UpdatePhaseIfNeeded(ctx, llv, v1alpha1.PhaseFailed, reason) if err != nil { - r.log.Error(err, fmt.Sprintf("[Reconcile] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") return controller.Result{}, err } return controller.Result{}, nil } - r.log.Info(fmt.Sprintf("[Reconcile] successfully validated the LVMLogicalVolume %s", llv.Name)) + log.Info("successfully validated the LVMLogicalVolume") shouldRequeue, err := r.ReconcileLVMLogicalVolume(ctx, llv, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[Reconcile] an error occurred while reconciling the LVMLogicalVolume: %s", llv.Name)) + log.Error(err, "an error occurred while reconciling the LVMLogicalVolume") if !errors.Is(err, errAlreadyRunning) && !errors.Is(err, errCleanupSameAsPreviouslyFailed) { updErr := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, v1alpha1.PhaseFailed, err.Error()) if updErr != nil { - r.log.Error(updErr, fmt.Sprintf("[Reconcile] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(updErr, "unable to update the LVMLogicalVolume") return controller.Result{}, updErr } } } if shouldRequeue { - r.log.Info(fmt.Sprintf("[Reconcile] some issues were occurred while reconciliation the LVMLogicalVolume %s. Requeue the request in %s", llv.Name, r.cfg.LLVRequeueInterval.String())) + log.Info("some issues were occurred while reconciliation the LVMLogicalVolume", "requeueIn", r.cfg.LLVRequeueInterval) return controller.Result{RequeueAfter: r.cfg.LLVRequeueInterval}, nil } - r.log.Info(fmt.Sprintf("[Reconcile] successfully ended reconciliation of the LVMLogicalVolume %s", llv.Name)) + log.Info("successfully ended reconciliation of the LVMLogicalVolume") return controller.Result{}, nil } func (r *Reconciler) ReconcileLVMLogicalVolume(ctx context.Context, llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] starts the reconciliation for the LVMLogicalVolume %s", llv.Name)) + log := r.log.WithName("ReconcileLVMLogicalVolume").WithValues("llvName", llv.Name) + log.Debug("starts the reconciliation for the LVMLogicalVolume") - r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolume] tries to identify the reconciliation type for the LVMLogicalVolume %s", llv.Name)) - r.log.Trace(fmt.Sprintf("[ReconcileLVMLogicalVolume] %+v", llv)) + log.Debug("tries to identify the reconciliation type for the LVMLogicalVolume") + log.Trace("LVMLogicalVolume", "llv", llv) switch r.identifyReconcileFunc(lvg.Spec.ActualVGNameOnTheNode, llv) { case internal.CreateReconcile: @@ -285,9 +296,9 @@ func (r *Reconciler) ReconcileLVMLogicalVolume(ctx context.Context, llv *v1alpha case internal.DeleteReconcile: return r.reconcileLLVDeleteFunc(ctx, llv, lvg) default: - r.log.Info(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s has completed configuration and should not be reconciled", llv.Name)) + log.Info("the LVMLogicalVolume has completed configuration and should not be reconciled") if llv.Status.Phase != v1alpha1.PhaseCreated { - r.log.Warning(fmt.Sprintf("[runEventReconcile] the LVMLogicalVolume %s should not be reconciled but has an unexpected phase: %s. Setting the phase to %s", llv.Name, llv.Status.Phase, v1alpha1.PhaseCreated)) + log.Warning("the LVMLogicalVolume should not be reconciled but has an unexpected phase. Setting the phase to Created", "currentPhase", llv.Status.Phase, "expectedPhase", v1alpha1.PhaseCreated) err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, v1alpha1.PhaseCreated, "") if err != nil { return true, err @@ -303,29 +314,38 @@ func (r *Reconciler) reconcileLLVCreateFunc( llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup, ) (bool, error) { - r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) + log := r.log.WithName("reconcileLLVCreateFunc").WithValues( + "llvName", llv.Name, + "lvName", llv.Spec.ActualLVNameOnTheNode, + "vgName", lvg.Spec.ActualVGNameOnTheNode) + log.Debug("starts reconciliation for the LVMLogicalVolume") // this check prevents infinite resource updating after retries if llv.Status == nil { err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, v1alpha1.PhasePending, "") if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") return true, err } } llvRequestSize, err := utils.GetLLVRequestedSize(llv, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) + log.Error(err, "unable to get LVMLogicalVolume requested size") return false, err } freeSpace := utils.GetFreeLVGSpaceForLLV(lvg, llv) - r.log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s type: %s requested size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, llvRequestSize.String(), freeSpace.String())) + log.Trace("the LVMLogicalVolume", + "type", llv.Spec.Type, + "requestedSize", llvRequestSize, + "freeSpace", freeSpace) if !utils.AreSizesEqualWithinDelta(llvRequestSize, freeSpace, internal.ResizeDelta) { if freeSpace.Value() < llvRequestSize.Value()+internal.ResizeDelta.Value() { err = errors.New("not enough space") - r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than the actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) + log.Error(err, "the LV requested size of the LVMLogicalVolume is more than the actual free space", + "requestedSize", llvRequestSize, + "freeSpace", freeSpace) // we return true cause the user might manage LVMVolumeGroup free space without changing the LLV return true, err @@ -335,15 +355,19 @@ func (r *Reconciler) reconcileLLVCreateFunc( var cmd string switch { case llv.Spec.Type == internal.Thick: - r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s will be created in VG %s with size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llvRequestSize.String())) + log.Debug("LV will be created in VG with size", + "size", llvRequestSize) cmd, err = r.commands.CreateThickLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value(), isContiguous(llv)) case llv.Spec.Source == nil: - r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] LV %s of the LVMLogicalVolume %s will be created in Thin-pool %s with size %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llv.Spec.Thin.PoolName, llvRequestSize.String())) + log.Debug("LV of the LVMLogicalVolume will be created in Thin-pool with size", + "thinPoolName", llv.Spec.Thin.PoolName, + "size", llvRequestSize) cmd, err = r.commands.CreateThinLogicalVolume(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Thin.PoolName, llv.Spec.ActualLVNameOnTheNode, llvRequestSize.Value()) case llv.Spec.Source.Kind == "LVMLogicalVolume": sourceLLV := &v1alpha1.LVMLogicalVolume{} if err := r.cl.Get(ctx, types.NamespacedName{Name: llv.Spec.Source.Name}, sourceLLV); err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get source LVMLogicalVolume %s for the LVMLogicalVolume %s", llv.Spec.Source.Name, llv.Name)) + log.Error(err, "unable to get source LVMLogicalVolume for the LVMLogicalVolume", + "sourceLLVName", llv.Spec.Source.Name) return true, err } @@ -359,30 +383,30 @@ func (r *Reconciler) reconcileLLVCreateFunc( } cmd = cmdTmp } - r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] ran cmd: %s", cmd)) + log.Debug("ran cmd", "cmd", cmd) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to create a %s LogicalVolume for the LVMLogicalVolume %s", llv.Spec.Type, llv.Name)) + log.Error(err, "unable to create a LogicalVolume for the LVMLogicalVolume", "type", llv.Spec.Type) return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully created LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) + log.Info("successfully created LV in VG for LVMLogicalVolume resource") - r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] adds the LV %s to the cache", llv.Spec.ActualLVNameOnTheNode)) + log.Debug("adds the LV to the cache") r.sdsCache.AddLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] tries to get the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) + log.Debug("tries to get the LV actual size") actualSize := r.getLVActualSize(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if actualSize.Value() == 0 { - r.log.Warning(fmt.Sprintf("[reconcileLLVCreateFunc] unable to get actual size for LV %s in VG %s (likely LV was not found in the cache), retry...", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + log.Debug("unable to get actual size for LV in VG (likely LV was not found in the cache), retry...") return true, nil } - r.log.Debug(fmt.Sprintf("[reconcileLLVCreateFunc] successfully got the LV %s actual size", llv.Spec.ActualLVNameOnTheNode)) - r.log.Trace(fmt.Sprintf("[reconcileLLVCreateFunc] the LV %s in VG: %s has actual size: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String())) + log.Debug("successfully got the LV actual size") + log.Trace("the LV in VG has actual size", "actualSize", actualSize) if err := r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, llv, actualSize); err != nil { return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVCreateFunc] successfully ended the reconciliation for the LVMLogicalVolume %s", llv.Name)) + log.Info("successfully ended the reconciliation for the LVMLogicalVolume") return false, nil } @@ -391,105 +415,119 @@ func (r *Reconciler) reconcileLLVUpdateFunc( llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup, ) (bool, error) { - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) + log := r.log.WithName("reconcileLLVUpdateFunc").WithValues( + "llvName", llv.Name, + "lvName", llv.Spec.ActualLVNameOnTheNode, + "vgName", lvg.Spec.ActualVGNameOnTheNode) + log.Debug("starts reconciliation for the LVMLogicalVolume") // status might be nil if a user creates the resource with LV name which matches existing LV on the node if llv.Status == nil { err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, v1alpha1.PhasePending, "") if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") return true, err } } // it needs to get current LV size from the node as status might be nil - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size before the extension", llv.Name)) + log.Debug("tries to get LVMLogicalVolume actual size before the extension") actualSize := r.getLVActualSize(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if actualSize.Value() == 0 { - r.log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s has zero size (likely LV was not updated in the cache) ", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + log.Warning("LV of the LVMLogicalVolume has zero size (likely LV was not updated in the cache)") return true, nil } - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size %s before the extension", llv.Name, actualSize.String())) + log.Debug("successfully got LVMLogicalVolume actual size before the extension", "actualSize", actualSize) - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to count the LVMLogicalVolume %s requested size", llv.Name)) + log.Debug("tries to count the LVMLogicalVolume requested size") llvRequestSize, err := utils.GetLLVRequestedSize(llv, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVCreateFunc] unable to get LVMLogicalVolume %s requested size", llv.Name)) + log.Error(err, "unable to get LVMLogicalVolume requested size") return false, err } - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully counted the LVMLogicalVolume %s requested size: %s", llv.Name, llvRequestSize.String())) + log.Debug("successfully counted the LVMLogicalVolume requested size", "requestedSize", llvRequestSize) if utils.AreSizesEqualWithinDelta(actualSize, llvRequestSize, internal.ResizeDelta) { - r.log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has the same actual size %s as the requested size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, actualSize.String(), llvRequestSize.String())) + log.Warning("the LV in VG has the same actual size as the requested size", + "actualSize", actualSize, + "requestedSize", llvRequestSize) if err := r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, llv, actualSize); err != nil { return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) + log.Info("successfully ended reconciliation for the LVMLogicalVolume") return false, nil } extendingSize := subtractQuantity(llvRequestSize, actualSize) - r.log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s has extending size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, extendingSize.String())) + log.Trace("the LV in VG has extending size", + "extendingSize", extendingSize) if extendingSize.Value() < 0 { err = fmt.Errorf("specified LV size %dB is less than actual one on the node %dB", llvRequestSize.Value(), actualSize.Value()) - r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to extend the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to extend the LVMLogicalVolume") return false, err } - r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s should be resized", llv.Name)) + log.Info("the LVMLogicalVolume should be resized") // this check prevents infinite resource updates after retry if llv.Status.Phase != v1alpha1.PhaseFailed { err := r.llvCl.UpdatePhaseIfNeeded(ctx, llv, v1alpha1.PhaseResizing, "") if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") return true, err } } freeSpace := utils.GetFreeLVGSpaceForLLV(lvg, llv) - r.log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LVMLogicalVolume %s, LV: %s, VG: %s, type: %s, extending size: %s, free space: %s", llv.Name, llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Spec.Type, extendingSize.String(), freeSpace.String())) + log.Trace("the LVMLogicalVolume", + "type", llv.Spec.Type, + "extendingSize", extendingSize, + "freeSpace", freeSpace) if !utils.AreSizesEqualWithinDelta(freeSpace, extendingSize, internal.ResizeDelta) { if freeSpace.Value() < extendingSize.Value()+internal.ResizeDelta.Value() { err = errors.New("not enough space") - r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s requested size %s of the LVMLogicalVolume %s is more than actual free space %s", llv.Spec.ActualLVNameOnTheNode, llvRequestSize.String(), llv.Name, freeSpace.String())) + log.Error(err, "the LV requested size of the LVMLogicalVolume is more than actual free space", + "requestedSize", llvRequestSize, + "freeSpace", freeSpace) // returns true cause a user might manage LVG free space without changing the LLV return true, err } } - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s will be extended with size: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, llvRequestSize.String())) + log.Debug("LV of the LVMLogicalVolume will be extended with size", + "size", llvRequestSize) cmd, err := r.commands.ExtendLV(llvRequestSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] runs cmd: %s", cmd)) + log.Debug("runs cmd", "cmd", cmd) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVUpdateFunc] unable to ExtendLV, name: %s, type: %s", llv.Spec.ActualLVNameOnTheNode, llv.Spec.Type)) + log.Error(err, "unable to ExtendLV", "name", llv.Spec.ActualLVNameOnTheNode, "type", llv.Spec.Type) return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully extended LV %s in VG %s for LVMLogicalVolume resource with name: %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, llv.Name)) + log.Info("successfully extended LV in VG for LVMLogicalVolume resource", "lvName", llv.Spec.ActualLVNameOnTheNode, "vgName", lvg.Spec.ActualVGNameOnTheNode) - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] tries to get LVMLogicalVolume %s actual size after the extension", llv.Name)) + log.Debug("tries to get LVMLogicalVolume actual size after the extension") newActualSize := r.getLVActualSize(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) // this case might be triggered if sds cache will not update lv state in time if newActualSize.Value() == actualSize.Value() { - r.log.Warning(fmt.Sprintf("[reconcileLLVUpdateFunc] LV %s of the LVMLogicalVolume %s was extended but cache is not updated yet. It will be retried", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + log.Warning("LV of the LVMLogicalVolume was extended but cache is not updated yet. It will be retried") return true, nil } - r.log.Debug(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully got LVMLogicalVolume %s actual size before the extension", llv.Name)) - r.log.Trace(fmt.Sprintf("[reconcileLLVUpdateFunc] the LV %s in VG %s actual size %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode, newActualSize.String())) + log.Debug("successfully got LVMLogicalVolume actual size after the extension") + log.Trace("the LV in VG actual size", + "actualSize", newActualSize) // need this here as a user might create the LLV with existing LV if err := r.llvCl.UpdatePhaseToCreatedIfNeeded(ctx, llv, newActualSize); err != nil { return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVUpdateFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) + log.Info("successfully ended reconciliation for the LVMLogicalVolume") return false, nil } @@ -498,32 +536,33 @@ func (r *Reconciler) reconcileLLVDeleteFunc( llv *v1alpha1.LVMLogicalVolume, lvg *v1alpha1.LVMVolumeGroup, ) (bool, error) { - r.log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] starts reconciliation for the LVMLogicalVolume %s", llv.Name)) + log := r.log.WithName("reconcileLLVDeleteFunc").WithValues("llvName", llv.Name) + log.Debug("starts reconciliation for the LVMLogicalVolume") // The controller won't remove the LLV resource and LV volume till the resource has any other finalizer. if len(llv.Finalizers) != 0 { if len(llv.Finalizers) > 1 || llv.Finalizers[0] != internal.SdsNodeConfiguratorFinalizer { - r.log.Debug(fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete LVMLogicalVolume %s for now due to it has any other finalizer", llv.Name)) + log.Debug("unable to delete LVMLogicalVolume for now due to it has any other finalizer") return false, nil } } shouldRequeue, err := r.deleteLVIfNeeded(ctx, lvg.Spec.ActualVGNameOnTheNode, llv) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to delete the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + log.Error(err, "unable to delete the LV in VG", "lvName", llv.Spec.ActualLVNameOnTheNode, "vgName", lvg.Spec.ActualVGNameOnTheNode) return shouldRequeue, err } - r.log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully deleted the LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, lvg.Spec.ActualVGNameOnTheNode)) + log.Info("successfully deleted the LV in VG", "lvName", llv.Spec.ActualLVNameOnTheNode, "vgName", lvg.Spec.ActualVGNameOnTheNode) err = r.removeLLVFinalizersIfExist(ctx, llv) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVDeleteFunc] unable to remove finalizers from the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to remove finalizers from the LVMLogicalVolume") return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVDeleteFunc] successfully ended reconciliation for the LVMLogicalVolume %s", llv.Name)) + log.Info("successfully ended reconciliation for the LVMLogicalVolume") return false, nil } @@ -554,21 +593,22 @@ func (r *Reconciler) removeLLVFinalizersIfExist( ctx context.Context, llv *v1alpha1.LVMLogicalVolume, ) error { + log := r.log.WithName("removeLLVFinalizersIfExist").WithValues("llvName", llv.Name) var removed bool for i, f := range llv.Finalizers { if f == internal.SdsNodeConfiguratorFinalizer { llv.Finalizers = append(llv.Finalizers[:i], llv.Finalizers[i+1:]...) removed = true - r.log.Debug(fmt.Sprintf("[removeLLVFinalizersIfExist] removed finalizer %s from the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + log.Debug("removed finalizer from the LVMLogicalVolume", "finalizer", internal.SdsNodeConfiguratorFinalizer) break } } if removed { - r.log.Trace(fmt.Sprintf("[removeLLVFinalizersIfExist] removed finalizer %s from the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + log.Trace("removed finalizer from the LVMLogicalVolume", "finalizer", internal.SdsNodeConfiguratorFinalizer) err := r.updateLVMLogicalVolumeSpec(ctx, llv) if err != nil { - r.log.Error(err, fmt.Sprintf("[updateLVMLogicalVolumeSpec] unable to update the LVMVolumeGroup %s", llv.Name)) + log.Error(err, "unable to update the LVMVolumeGroup") return err } } @@ -594,15 +634,16 @@ func checkIfLVBelongsToLLV(llv *v1alpha1.LVMLogicalVolume, lv *internal.LVData) } func (r *Reconciler) deleteLVIfNeeded(ctx context.Context, vgName string, llv *v1alpha1.LVMLogicalVolume) (bool, error) { + log := r.log.WithName("deleteLVIfNeeded").WithValues("llvName", llv.Name) lv := r.sdsCache.FindLV(vgName, llv.Spec.ActualLVNameOnTheNode) if lv == nil || !lv.Exist { - r.log.Warning(fmt.Sprintf("[deleteLVIfNeeded] did not find LV %s in VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) + log.Warning("did not find LV in VG", "lvName", llv.Spec.ActualLVNameOnTheNode, "vgName", vgName) return false, nil } // this case prevents unexpected same-name LV deletions which does not actually belong to our LLV if !checkIfLVBelongsToLLV(llv, &lv.Data) { - r.log.Warning(fmt.Sprintf("[deleteLVIfNeeded] no need to delete LV %s as it doesn't belong to LVMLogicalVolume %s", lv.Data.LVName, llv.Name)) + log.Warning("no need to delete LV as it doesn't belong to LVMLogicalVolume", "lvName", lv.Data.LVName) return false, nil } @@ -613,13 +654,13 @@ func (r *Reconciler) deleteLVIfNeeded(ctx context.Context, vgName string, llv *v } cmd, err := r.commands.RemoveLV(vgName, llv.Spec.ActualLVNameOnTheNode) - r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] runs cmd: %s", cmd)) + log.Debug("runs cmd", "cmd", cmd) if err != nil { - r.log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] unable to remove LV %s from VG %s", llv.Spec.ActualLVNameOnTheNode, vgName)) + log.Error(err, "unable to remove LV from VG", "lvName", llv.Spec.ActualLVNameOnTheNode, "vgName", vgName) return true, err } - r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] mark LV %s in the cache as removed", lv.Data.LVName)) + log.Debug("mark LV in the cache as removed", "lvName", lv.Data.LVName) r.sdsCache.MarkLVAsRemoved(lv.Data.VGName, lv.Data.LVName) return false, nil @@ -637,13 +678,14 @@ func (r *Reconciler) getLVActualSize(vgName, lvName string) resource.Quantity { } func (r *Reconciler) addLLVFinalizerIfNotExist(ctx context.Context, llv *v1alpha1.LVMLogicalVolume) (bool, error) { + log := r.log.WithName("addLLVFinalizerIfNotExist").WithValues("llvName", llv.Name) if slices.Contains(llv.Finalizers, internal.SdsNodeConfiguratorFinalizer) { return false, nil } llv.Finalizers = append(llv.Finalizers, internal.SdsNodeConfiguratorFinalizer) - r.log.Trace(fmt.Sprintf("[addLLVFinalizerIfNotExist] added finalizer %s to the LVMLogicalVolume %s", internal.SdsNodeConfiguratorFinalizer, llv.Name)) + log.Trace("added finalizer to the LVMLogicalVolume", "finalizer", internal.SdsNodeConfiguratorFinalizer) err := r.updateLVMLogicalVolumeSpec(ctx, llv) if err != nil { return false, err diff --git a/images/agent/internal/controller/llv/reconciler_ee.go b/images/agent/internal/controller/llv/reconciler_ee.go index dc09a6d86..f2c12b54b 100644 --- a/images/agent/internal/controller/llv/reconciler_ee.go +++ b/images/agent/internal/controller/llv/reconciler_ee.go @@ -19,34 +19,38 @@ import ( ) func (r *Reconciler) cleanupVolume(ctx context.Context, llv *v1alpha1.LVMLogicalVolume, lv *cache.LVData, vgName string, cleanupMethod string) (shouldRequeue bool, err error) { + log := r.log.WithName("cleanupVolume").WithValues( + "vgName", vgName, + "method", cleanupMethod) if !feature.VolumeCleanupEnabled() { return false, fmt.Errorf("volume cleanup is not supported in your edition") } if cleanupMethod == v1alpha1.VolumeCleanupDiscard && lv.Data.PoolName != "" { err := errors.New("Discard cleanup method is disabled for thin volumes") - r.log.Error(err, "[deleteLVIfNeeded] Discard cleanup method is not allowed for thin volumes") + log.Error(err, "Discard cleanup method is not allowed for thin volumes") return false, err } lvName := llv.Spec.ActualLVNameOnTheNode + log = log.WithValues("lvName", lvName) started, prevFailedMethod := r.startCleanupRunning(vgName, lvName) if !started { - r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] cleanup already running for LV %s in VG %s", lvName, vgName)) + log.Debug("cleanup already running for LV in VG") return false, errAlreadyRunning } - r.log.Trace(fmt.Sprintf("[deleteLVIfNeeded] starting cleaning up for LV %s in VG %s with method %s", lvName, vgName, cleanupMethod)) + log.Trace("starting cleaning up for LV in VG with method") defer func() { - r.log.Trace(fmt.Sprintf("[deleteLVIfNeeded] stopping cleaning up for LV %s in VG %s with method %s", lvName, vgName, cleanupMethod)) + log.Trace("stopping cleaning up for LV in VG with method") err := r.stopCleanupRunning(vgName, lvName, prevFailedMethod) if err != nil { - r.log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] can't unregister running cleanup for LV %s in VG %s", lvName, vgName)) + log.Error(err, "can't unregister running cleanup for LV in VG") } }() // prevent doing cleanup with previously failed method if prevFailedMethod != nil && *prevFailedMethod == cleanupMethod { - r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] was already failed with method %s for LV %s in VG %s", *prevFailedMethod, lvName, vgName)) + log.Debug("was already failed with method for LV in VG", "failedMethod", *prevFailedMethod) return false, errCleanupSameAsPreviouslyFailed } @@ -56,14 +60,14 @@ func (r *Reconciler) cleanupVolume(ctx context.Context, llv *v1alpha1.LVMLogical v1alpha1.PhaseCleaning, fmt.Sprintf("Cleaning up volume %s in %s group using %s", lvName, vgName, cleanupMethod), ); err != nil { - r.log.Error(err, "[deleteLVIfNeeded] changing phase to Cleaning") + log.Error(err, "changing phase to Cleaning") return true, fmt.Errorf("changing phase to Cleaning :%w", err) } prevFailedMethod = &cleanupMethod - r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] running cleanup for LV %s in VG %s with method %s", lvName, vgName, cleanupMethod)) + log.Debug("running cleanup for LV in VG with method") if shouldRetry, err := utils.VolumeCleanup(ctx, r.log, r.sdsCache, lv, cleanupMethod); err != nil { - r.log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] unable to clean up LV %s in VG %s with method %s", lvName, vgName, cleanupMethod)) + log.Error(err, "unable to clean up LV in VG with method") if shouldRetry { prevFailedMethod = nil } diff --git a/images/agent/internal/controller/llv_extender/reconciler.go b/images/agent/internal/controller/llv_extender/reconciler.go index 25b6f9b40..42659c594 100644 --- a/images/agent/internal/controller/llv_extender/reconciler.go +++ b/images/agent/internal/controller/llv_extender/reconciler.go @@ -105,38 +105,42 @@ func (r *Reconciler) Reconcile( req controller.ReconcileRequest[*v1alpha1.LVMVolumeGroup], ) (controller.Result, error) { lvg := req.Object + log := r.log.WithName("Reconcile") + if lvg != nil { + log = log.WithValues("lvgName", lvg.Name) + } if !r.shouldLLVExtenderReconcileEvent(lvg) { - r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] no need to reconcile a request for the LVMVolumeGroup %s", lvg.Name)) + log.Info("no need to reconcile a request for the LVMVolumeGroup") return controller.Result{}, nil } shouldRequeue := r.ReconcileLVMLogicalVolumeExtension(ctx, lvg) if shouldRequeue { - r.log.Warning(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] Reconciler needs a retry for the LVMVolumeGroup %s. Retry in %s", lvg.Name, r.cfg.VolumeGroupScanInterval.String())) + log.Warning("Reconciler needs a retry for the LVMVolumeGroup", "retryIn", r.cfg.VolumeGroupScanInterval) return controller.Result{ RequeueAfter: r.cfg.VolumeGroupScanInterval, }, nil } - r.log.Info(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] successfully reconciled LVMLogicalVolumes for the LVMVolumeGroup %s", lvg.Name)) + log.Info("successfully reconciled LVMLogicalVolumes for the LVMVolumeGroup") return controller.Result{}, nil } func (r *Reconciler) shouldLLVExtenderReconcileEvent(newLVG *v1alpha1.LVMVolumeGroup) bool { // for new LVMVolumeGroups if reflect.DeepEqual(newLVG.Status, v1alpha1.LVMVolumeGroupStatus{}) { - r.log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as its Status is not initialized yet", newLVG.Name)) + r.log.Debug("the LVMVolumeGroup should not be reconciled as its Status is not initialized yet", "lvgName", newLVG.Name) return false } if !utils.LVGBelongsToNode(newLVG, r.cfg.NodeName) { - r.log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as it does not belong to the node %s", newLVG.Name, r.cfg.NodeName)) + r.log.Debug("the LVMVolumeGroup should not be reconciled as it does not belong to the node", "lvgName", newLVG.Name, "nodeName", r.cfg.NodeName) return false } if newLVG.Status.Phase != internal.PhaseReady { - r.log.Debug(fmt.Sprintf("[RunLVMLogicalVolumeExtenderWatcherController] the LVMVolumeGroup %s should not be reconciled as its Status.Phase is not Ready", newLVG.Name)) + r.log.Debug("the LVMVolumeGroup should not be reconciled as its Status.Phase is not Ready", "lvgName", newLVG.Name) return false } @@ -147,83 +151,89 @@ func (r *Reconciler) ReconcileLVMLogicalVolumeExtension( ctx context.Context, lvg *v1alpha1.LVMVolumeGroup, ) bool { - r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] tries to get LLV resources with percent size for the LVMVolumeGroup %s", lvg.Name)) + log := r.log.WithName("ReconcileLVMLogicalVolumeExtension").WithValues("lvgName", lvg.Name) + log.Debug("tries to get LLV resources with percent size for the LVMVolumeGroup") llvs, err := r.getAllLLVsWithPercentSize(ctx, lvg.Name) if err != nil { - r.log.Error(err, "[ReconcileLVMLogicalVolumeExtension] unable to get LLV resources") + log.Error(err, "unable to get LLV resources") return true } - r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got LLV resources for the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully got LLV resources for the LVMVolumeGroup") if len(llvs) == 0 { - r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] no LVMLogicalVolumes with percent size were found for the LVMVolumeGroup %s", lvg.Name)) + log.Info("no LVMLogicalVolumes with percent size were found for the LVMVolumeGroup") return false } shouldRetry := false for _, llv := range llvs { - r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] starts to reconcile the LVMLogicalVolume %s", llv.Name)) + log := log.WithValues("llvName", llv.Name) + log.Info("starts to reconcile the LVMLogicalVolume") llvRequestedSize, err := utils.GetLLVRequestedSize(&llv, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to get requested size of the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to get requested size of the LVMLogicalVolume") shouldRetry = true continue } - r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] successfully got the requested size of the LVMLogicalVolume %s, size: %s", llv.Name, llvRequestedSize.String())) + log.Debug("successfully got the requested size of the LVMLogicalVolume", "size", llvRequestedSize) lv := r.sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if lv == nil { err = fmt.Errorf("lv %s not found", llv.Spec.ActualLVNameOnTheNode) - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to find LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + log.Error(err, "unable to find LV of the LVMLogicalVolume", "lvName", llv.Spec.ActualLVNameOnTheNode) err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, v1alpha1.PhaseFailed, err.Error()) if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") } shouldRetry = true continue } if utils.AreSizesEqualWithinDelta(llvRequestedSize, lv.Data.LVSize, internal.ResizeDelta) { - r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should not be extended", llv.Name)) + log.Info("the LVMLogicalVolume should not be extended") continue } if llvRequestedSize.Value() < lv.Data.LVSize.Value() { - r.log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s requested size %s is less than actual one on the node %s", llv.Name, llvRequestedSize.String(), lv.Data.LVSize.String())) + log.Warning("the LVMLogicalVolume requested size is less than actual one on the node", + "requestedSize", llvRequestedSize, + "actualSize", lv.Data.LVSize) continue } freeSpace := utils.GetFreeLVGSpaceForLLV(lvg, &llv) if llvRequestedSize.Value()+internal.ResizeDelta.Value() > freeSpace.Value() { err = errors.New("not enough space") - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend the LV %s of the LVMLogicalVolume %s", llv.Spec.ActualLVNameOnTheNode, llv.Name)) + log.Error(err, "unable to extend the LV of the LVMLogicalVolume", "lvName", llv.Spec.ActualLVNameOnTheNode) err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, v1alpha1.PhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") shouldRetry = true } continue } - r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s should be extended from %s to %s size", llv.Name, llv.Status.ActualSize.String(), llvRequestedSize.String())) + log.Info("the LVMLogicalVolume should be extended", + "fromSize", llv.Status.ActualSize, + "toSize", llvRequestedSize) err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, v1alpha1.PhaseResizing, "") if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") shouldRetry = true continue } cmd, err := r.commands.ExtendLV(llvRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to extend LV %s of the LVMLogicalVolume %s, cmd: %s", llv.Spec.ActualLVNameOnTheNode, llv.Name, cmd)) + log.Error(err, "unable to extend LV of the LVMLogicalVolume", "lvName", llv.Spec.ActualLVNameOnTheNode, "cmd", cmd) err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, v1alpha1.PhaseFailed, fmt.Sprintf("unable to extend LV, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") } shouldRetry = true continue } - r.log.Info(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] the LVMLogicalVolume %s has been successfully extended", llv.Name)) + log.Info("the LVMLogicalVolume has been successfully extended") var ( maxAttempts = 5 @@ -232,22 +242,22 @@ func (r *Reconciler) ReconcileLVMLogicalVolumeExtension( for currentAttempts < maxAttempts { lv = r.sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, llv.Spec.ActualLVNameOnTheNode) if utils.AreSizesEqualWithinDelta(lv.Data.LVSize, llvRequestedSize, internal.ResizeDelta) { - r.log.Debug(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s of the LVMLogicalVolume %s was successfully updated in the cache", lv.Data.LVName, llv.Name)) + log.Debug("LV of the LVMLogicalVolume was successfully updated in the cache", "lvName", lv.Data.LVName) break } - r.log.Warning(fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] LV %s size of the LVMLogicalVolume %s was not yet updated in the cache, retry...", lv.Data.LVName, llv.Name)) + log.Warning("LV size of the LVMLogicalVolume was not yet updated in the cache, retry...", "lvName", lv.Data.LVName) currentAttempts++ time.Sleep(1 * time.Second) } if currentAttempts == maxAttempts { err = fmt.Errorf("LV %s is not updated in the cache", lv.Data.LVName) - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to resize the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to resize the LVMLogicalVolume") shouldRetry = true if err = r.llvCl.UpdatePhaseIfNeeded(ctx, &llv, v1alpha1.PhaseFailed, err.Error()); err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileLVMLogicalVolumeExtension] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") } continue } diff --git a/images/agent/internal/controller/llvs/reconciler_ee.go b/images/agent/internal/controller/llvs/reconciler_ee.go index aa0a2d0f3..e21dad0ef 100644 --- a/images/agent/internal/controller/llvs/reconciler_ee.go +++ b/images/agent/internal/controller/llvs/reconciler_ee.go @@ -95,27 +95,33 @@ func (r *Reconciler) ShouldReconcileCreate(_ *v1alpha1.LVMLogicalVolumeSnapshot) func (r *Reconciler) Reconcile(ctx context.Context, req controller.ReconcileRequest[*v1alpha1.LVMLogicalVolumeSnapshot]) (controller.Result, error) { llvs := req.Object + log := r.log.WithName("Reconcile") + if llvs != nil { + log = log.WithValues("llvsName", llvs.Name) + } // this case prevents the unexpected behavior when the controller runs up with existing LVMLogicalVolumeSnapshots if lvs, _ := r.sdsCache.GetLVs(); len(lvs) == 0 { - r.log.Warning(fmt.Sprintf("unable to reconcile the request as no LV was found in the cache. Retry in %s", r.cfg.LLVRequeueInterval.String())) + log.Warning("unable to reconcile the request as no LV was found in the cache", + "retryIn", r.cfg.LLVRequeueInterval) return controller.Result{RequeueAfter: r.cfg.LLVRequeueInterval}, nil } // reconcile shouldRequeue, err := r.reconcileLVMLogicalVolumeSnapshot(ctx, llvs) if err != nil { - r.log.Error(err, fmt.Sprintf("an error occurred while reconciling the LVMLogicalVolumeSnapshot: %s", llvs.Name)) + log.Error(err, "an error occurred while reconciling the LVMLogicalVolumeSnapshot") // will lead to exponential backoff return controller.Result{}, err } if shouldRequeue { - r.log.Info(fmt.Sprintf("reconciliation of LVMLogicalVolumeSnapshot %s is not finished. Requeue the request in %s", llvs.Name, r.cfg.LLVSRequeueInterval.String())) + log.Info("reconciliation of LVMLogicalVolumeSnapshot is not finished", + "requeueIn", r.cfg.LLVSRequeueInterval) // will lead to retry after fixed time return controller.Result{RequeueAfter: r.cfg.LLVSRequeueInterval}, nil } - r.log.Info(fmt.Sprintf("successfully ended reconciliation of the LVMLogicalVolumeSnapshot %s", llvs.Name)) + log.Info("successfully ended reconciliation of the LVMLogicalVolumeSnapshot") return controller.Result{}, nil } @@ -123,6 +129,7 @@ func (r *Reconciler) reconcileLVMLogicalVolumeSnapshot( ctx context.Context, llvs *v1alpha1.LVMLogicalVolumeSnapshot, ) (bool, error) { + log := r.log.WithName("reconcileLVMLogicalVolumeSnapshot").WithValues("llvsName", llvs.Name) switch { case llvs.DeletionTimestamp != nil: // delete @@ -130,9 +137,9 @@ func (r *Reconciler) reconcileLVMLogicalVolumeSnapshot( case llvs.Status == nil || llvs.Status.Phase == v1alpha1.PhasePending: return r.reconcileLLVSCreateFunc(ctx, llvs) case llvs.Status.Phase == v1alpha1.PhaseCreated: - r.log.Info(fmt.Sprintf("the LVMLogicalVolumeSnapshot %s is already Created and should not be reconciled", llvs.Name)) + log.Info("the LVMLogicalVolumeSnapshot is already Created and should not be reconciled") default: - r.log.Warning(fmt.Sprintf("skipping LLVS reconciliation, since it is in phase: %s", llvs.Status.Phase)) + log.Warning("skipping LLVS reconciliation, since it is in phase", "phase", llvs.Status.Phase) } return false, nil @@ -142,6 +149,7 @@ func (r *Reconciler) reconcileLLVSCreateFunc( ctx context.Context, llvs *v1alpha1.LVMLogicalVolumeSnapshot, ) (bool, error) { + log := r.log.WithName("reconcileLLVSCreateFunc").WithValues("llvsName", llvs.Name) // should precede setting finalizer to be able to determine the node when deleting if llvs.Status == nil { llv := &v1alpha1.LVMLogicalVolume{} @@ -155,7 +163,7 @@ func (r *Reconciler) reconcileLLVSCreateFunc( } if llv.Spec.Thin == nil { - r.log.Error(nil, fmt.Sprintf("Failed reconciling LLVS %s, LLV %s is not Thin", llvs.Name, llv.Name)) + log.Error(nil, "Failed reconciling LLVS, LLV is not Thin", "llvName", llv.Name) llvs.Status = &v1alpha1.LVMLogicalVolumeSnapshotStatus{ Phase: v1alpha1.PhaseFailed, Reason: fmt.Sprintf("Source LLV %s is not Thin", llv.Name), @@ -174,7 +182,7 @@ func (r *Reconciler) reconcileLLVSCreateFunc( } if lvg.Spec.Local.NodeName != r.cfg.NodeName { - r.log.Info(fmt.Sprintf("LLVS %s is from node %s. Current node %s", llvs.Name, lvg.Spec.Local.NodeName, r.cfg.NodeName)) + log.Info("LLVS is from node. Current node", "nodeName", lvg.Spec.Local.NodeName, "currentNode", r.cfg.NodeName) return false, nil } @@ -182,7 +190,7 @@ func (r *Reconciler) reconcileLLVSCreateFunc( return tps.Name == llv.Spec.Thin.PoolName }) if thinPoolIndex < 0 { - r.log.Error(nil, fmt.Sprintf("LLVS %s thin pool %s is not found in LVG %s", llvs.Name, llv.Spec.Thin.PoolName, lvg.Name)) + log.Error(nil, "LLVS thin pool is not found in LVG", "thinPoolName", llv.Spec.Thin.PoolName, "lvgName", lvg.Name) llvs.Status = &v1alpha1.LVMLogicalVolumeSnapshotStatus{ Phase: v1alpha1.PhasePending, Reason: fmt.Sprintf("Thin pool %s is not found in LVG %s", llv.Spec.Thin.PoolName, lvg.Name), @@ -191,7 +199,7 @@ func (r *Reconciler) reconcileLLVSCreateFunc( } if llv.Status == nil || llv.Status.ActualSize.Value() == 0 { - r.log.Error(nil, fmt.Sprintf("Error reconciling LLVS %s, source LLV %s ActualSize is not known", llvs.Name, llv.Name)) + log.Error(nil, "Error reconciling LLVS, source LLV ActualSize is not known", "llvName", llv.Name) llvs.Status = &v1alpha1.LVMLogicalVolumeSnapshotStatus{ Phase: v1alpha1.PhasePending, Reason: fmt.Sprintf("Source LLV %s ActualSize is not known", llv.Name), @@ -200,13 +208,10 @@ func (r *Reconciler) reconcileLLVSCreateFunc( } if lvg.Status.ThinPools[thinPoolIndex].AvailableSpace.Value() < llv.Status.ActualSize.Value() { - r.log.Error(nil, fmt.Sprintf( - "LLVS %s: not enough space available in thin pool %s: need at least %s, got %s", - llvs.Name, - llv.Spec.Thin.PoolName, - llv.Status.ActualSize.String(), - lvg.Status.ThinPools[thinPoolIndex].AvailableSpace.String(), - )) + log.Error(nil, "LLVS: not enough space available in thin pool", + "thinPoolName", llv.Spec.Thin.PoolName, + "needed", llv.Status.ActualSize.String(), + "available", lvg.Status.ThinPools[thinPoolIndex].AvailableSpace.String()) llvs.Status = &v1alpha1.LVMLogicalVolumeSnapshotStatus{ Phase: v1alpha1.PhasePending, Reason: fmt.Sprintf( @@ -228,23 +233,23 @@ func (r *Reconciler) reconcileLLVSCreateFunc( } if err := r.cl.Status().Update(ctx, llvs); err != nil { - r.log.Error(err, "Failed updating status of "+llvs.Name) + log.Error(err, "Failed updating status of LLVS") return true, err } } // check node if llvs.Status.NodeName != r.cfg.NodeName { - r.log.Info(fmt.Sprintf("LLVS %s has a Status with different node %s", llvs.Name, llvs.Status.NodeName)) + log.Info("LLVS has a Status with different node", "statusNodeName", llvs.Status.NodeName) return false, nil } // this block should precede any side-effects, which should be reverted during delete if !slices.Contains(llvs.Finalizers, internal.SdsNodeConfiguratorFinalizer) { llvs.Finalizers = append(llvs.Finalizers, internal.SdsNodeConfiguratorFinalizer) - r.log.Info("adding finalizer to LLVS " + llvs.Name) + log.Info("adding finalizer to LLVS") if err := r.cl.Update(ctx, llvs); err != nil { - r.log.Error(err, "Failed adding finalizer to LLVS "+llvs.Name) + log.Error(err, "Failed adding finalizer to LLVS") return true, err } } @@ -260,29 +265,20 @@ func (r *Reconciler) reconcileLLVSCreateFunc( llvs.Status.ActualLVNameOnTheNode, utils.NewEnabledTags(v1alpha1.LLVSNameTag, llvs.Name), ) - r.log.Debug(fmt.Sprintf("[reconcileLLVSCreateFunc] ran cmd: %s", cmd)) + log.Debug("ran cmd", "cmd", cmd) if err != nil { - r.log.Error( - err, - fmt.Sprintf( - "[reconcileLLVSCreateFunc] unable to create a LVMLogicalVolumeSnapshot %s from %s/%s", - llvs.ActualSnapshotNameOnTheNode(), - llvs.Status.ActualVGNameOnTheNode, - llvs.Status.ActualLVNameOnTheNode, - )) + log.Error(err, "unable to create a LVMLogicalVolumeSnapshot", + "snapshotName", llvs.ActualSnapshotNameOnTheNode(), + "vgName", llvs.Status.ActualVGNameOnTheNode, + "lvName", llvs.Status.ActualLVNameOnTheNode) llvs.Status.Reason = fmt.Sprintf("Error during snapshot creation (will be retried): %v", err) updateErr := r.cl.Status().Update(ctx, llvs) err = errors.Join(err, updateErr) return true, err } - r.log.Info( - fmt.Sprintf( - "[reconcileLLVSCreateFunc] successfully created LV %s in VG %s for LVMLogicalVolumeSnapshot resource with name: %s", - llvs.ActualSnapshotNameOnTheNode(), - llvs.Status.ActualVGNameOnTheNode, - llvs.Name, - ), - ) + log.Info("successfully created LV in VG for LVMLogicalVolumeSnapshot resource", + "lvName", llvs.ActualSnapshotNameOnTheNode(), + "vgName", llvs.Status.ActualVGNameOnTheNode) r.sdsCache.AddLV(llvs.Status.ActualVGNameOnTheNode, llvs.ActualSnapshotNameOnTheNode()) llvs.Status.Reason = "Waiting for created volume to become discovered" @@ -290,16 +286,16 @@ func (r *Reconciler) reconcileLLVSCreateFunc( return true, err case reflect.ValueOf(snapshotLVData.Data).IsZero(): // still "Waiting for created volume to become discovered" - r.log.Info("[reconcileLLVSCreateFunc] waiting for created volume to become discovered") + log.Info("waiting for created volume to become discovered") return true, nil default: - r.log.Info("[reconcileLLVSCreateFunc] updating LLVS size") + log.Info("updating LLVS size") // update size & phase size := resource.NewQuantity(snapshotLVData.Data.LVSize.Value(), resource.BinarySI) usedSize, err := snapshotLVData.Data.GetUsedSize() if err != nil { - r.log.Error(err, "error parsing LV size") + log.Error(err, "error parsing LV size") return true, err } @@ -316,6 +312,7 @@ func (r *Reconciler) reconcileLLVSDeleteFunc( ctx context.Context, llvs *v1alpha1.LVMLogicalVolumeSnapshot, ) (bool, error) { + log := r.log.WithName("reconcileLLVSDeleteFunc").WithValues("llvsName", llvs.Name) if len(llvs.Finalizers) == 0 { // means that we've deleted everything already (see below) return false, nil @@ -323,52 +320,53 @@ func (r *Reconciler) reconcileLLVSDeleteFunc( if len(llvs.Finalizers) > 1 || llvs.Finalizers[0] != internal.SdsNodeConfiguratorFinalizer { // postpone deletion until another finalizer gets removed - r.log.Warning(fmt.Sprintf("[reconcileLLVSDeleteFunc] unable to delete LVMLogicalVolumeSnapshot %s for now due to it has any other finalizer", llvs.Name)) + log.Warning("unable to delete LVMLogicalVolumeSnapshot for now due to it has any other finalizer") return false, nil } err := r.deleteLVIfNeeded(llvs.Name, llvs.ActualSnapshotNameOnTheNode(), llvs.Status.ActualVGNameOnTheNode) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVSDeleteFunc] unable to delete the LV %s in VG %s", llvs.ActualSnapshotNameOnTheNode(), llvs.Status.ActualVGNameOnTheNode)) + log.Error(err, "unable to delete the LV in VG", "lvName", llvs.ActualSnapshotNameOnTheNode(), "vgName", llvs.Status.ActualVGNameOnTheNode) return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVSDeleteFunc] successfully deleted the LV %s in VG %s", llvs.ActualSnapshotNameOnTheNode(), llvs.Status.ActualVGNameOnTheNode)) + log.Info("successfully deleted the LV in VG", "lvName", llvs.ActualSnapshotNameOnTheNode(), "vgName", llvs.Status.ActualVGNameOnTheNode) // at this point we have exactly 1 finalizer llvs.Finalizers = nil if err := r.cl.Update(ctx, llvs); err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLLVSDeleteFunc] unable to remove finalizers from the LVMLogicalVolumeSnapshot %s", llvs.Name)) + log.Error(err, "unable to remove finalizers from the LVMLogicalVolumeSnapshot") return true, err } - r.log.Info(fmt.Sprintf("[reconcileLLVSDeleteFunc] successfully ended deletion of LVMLogicalVolumeSnapshot %s", llvs.Name)) + log.Info("successfully ended deletion of LVMLogicalVolumeSnapshot") return false, nil } func (r *Reconciler) deleteLVIfNeeded(llvsName, llvsActualNameOnTheNode, vgActualNameOnTheNode string) error { + log := r.log.WithName("deleteLVIfNeeded").WithValues("lvName", llvsActualNameOnTheNode, "vgName", vgActualNameOnTheNode) lv := r.sdsCache.FindLV(vgActualNameOnTheNode, llvsActualNameOnTheNode) if lv == nil || !lv.Exist { - r.log.Warning(fmt.Sprintf("[deleteLVIfNeeded] did not find LV %s in VG %s", llvsActualNameOnTheNode, vgActualNameOnTheNode)) + log.Warning("did not find LV in VG") return nil } if ok, name := utils.ReadValueFromTags(lv.Data.LvTags, v1alpha1.LLVSNameTag); !ok { - r.log.Warning(fmt.Sprintf("[deleteLVIfNeeded] did not find required tags on LV %s in VG %s", llvsActualNameOnTheNode, vgActualNameOnTheNode)) + log.Warning("did not find required tags on LV in VG") return nil } else if name != llvsName { - r.log.Warning(fmt.Sprintf("[deleteLVIfNeeded] name in tag doesn't match %s on LV %s in VG %s", llvsName, llvsActualNameOnTheNode, vgActualNameOnTheNode)) + log.Warning("name in tag doesn't match on LV in VG", "expectedName", llvsName) return nil } cmd, err := r.commands.RemoveLV(vgActualNameOnTheNode, llvsActualNameOnTheNode) - r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] runs cmd: %s", cmd)) + log.Debug("runs cmd", "cmd", cmd) if err != nil { - r.log.Error(err, fmt.Sprintf("[deleteLVIfNeeded] unable to remove LV %s from VG %s", llvsActualNameOnTheNode, vgActualNameOnTheNode)) + log.Error(err, "unable to remove LV from VG") return err } - r.log.Debug(fmt.Sprintf("[deleteLVIfNeeded] mark LV %s in the cache as removed", lv.Data.LVName)) + log.Debug("mark LV in the cache as removed", "lvName", lv.Data.LVName) r.sdsCache.MarkLVAsRemoved(lv.Data.VGName, lv.Data.LVName) return nil diff --git a/images/agent/internal/controller/lvg/discoverer.go b/images/agent/internal/controller/lvg/discoverer.go index 8cc75f78b..da3fc839d 100644 --- a/images/agent/internal/controller/lvg/discoverer.go +++ b/images/agent/internal/controller/lvg/discoverer.go @@ -81,135 +81,152 @@ func (d *Discoverer) Name() string { } func (d *Discoverer) Discover(ctx context.Context) (controller.Result, error) { - d.log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler starts LVMVolumeGroup resources reconciliation") + log := d.log.WithName("Discover") + log.Info("Reconciler starts LVMVolumeGroup resources reconciliation") shouldRequeue := d.LVMVolumeGroupDiscoverReconcile(ctx) if shouldRequeue { - d.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error occurred while run the Reconciler func, retry in %s", d.cfg.VolumeGroupScanInterval.String())) + log.Warning("an error occurred while run the Reconciler func", + "retryIn", d.cfg.VolumeGroupScanInterval) return controller.Result{ RequeueAfter: d.cfg.VolumeGroupScanInterval, }, nil } - d.log.Info("[RunLVMVolumeGroupDiscoverController] Reconciler successfully ended LVMVolumeGroup resources reconciliation") + log.Info("Reconciler successfully ended LVMVolumeGroup resources reconciliation") return controller.Result{}, nil } func (d *Discoverer) LVMVolumeGroupDiscoverReconcile(ctx context.Context) bool { + log := d.log.WithName("LVMVolumeGroupDiscoverReconcile") reconcileStart := time.Now() - d.log.Info("[RunLVMVolumeGroupDiscoverController] starts the reconciliation") + log.Info("starts the reconciliation") currentLVMVGs, err := d.GetAPILVMVolumeGroups(ctx) if err != nil { - d.log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to run GetAPILVMVolumeGroups") + log.Error(err, "unable to run GetAPILVMVolumeGroups") return true } if len(currentLVMVGs) == 0 { - d.log.Debug("[RunLVMVolumeGroupDiscoverController] no current LVMVolumeGroups found") + log.Debug("no current LVMVolumeGroups found") } blockDevices, err := d.bdCl.GetAPIBlockDevices(ctx, DiscovererName, nil) if err != nil { - d.log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to GetAPIBlockDevices") + log.Error(err, "unable to GetAPIBlockDevices") for _, lvg := range currentLVMVGs { err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionFalse, internal.TypeVGReady, "NoBlockDevices", fmt.Sprintf("unable to get block devices resources, err: %s", err.Error())) if err != nil { - d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGReady, + "lvgName", lvg.Name) } } return true } if len(blockDevices) == 0 { - d.log.Info("[RunLVMVolumeGroupDiscoverController] no BlockDevices were found") + log.Info("no BlockDevices were found") return false } filteredLVGs := filterLVGsByNode(currentLVMVGs, d.cfg.NodeName) - d.log.Debug("[RunLVMVolumeGroupDiscoverController] tries to get LVMVolumeGroup candidates") + log.Debug("tries to get LVMVolumeGroup candidates") candidates, err := d.GetLVMVolumeGroupCandidates(blockDevices) if err != nil { - d.log.Error(err, "[RunLVMVolumeGroupDiscoverController] unable to run GetLVMVolumeGroupCandidates") + log.Error(err, "unable to run GetLVMVolumeGroupCandidates") for _, lvg := range filteredLVGs { - d.log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] turn LVMVolumeGroup %s to non operational. LVG struct: %+v ", lvg.Name, lvg)) + log.Trace("turn LVMVolumeGroup to non operational", "lvgName", lvg.Name, "lvg", lvg) err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionFalse, internal.TypeVGReady, "DataConfigurationFailed", fmt.Sprintf("unable to configure data, err: %s", err.Error())) if err != nil { - d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGReady, "lvgName", lvg.Name) } } return true } - d.log.Debug("[RunLVMVolumeGroupDiscoverController] successfully got LVMVolumeGroup candidates") + log.Debug("successfully got LVMVolumeGroup candidates") if len(candidates) == 0 { - d.log.Debug("[RunLVMVolumeGroupDiscoverController] no candidates were found on the node") + log.Debug("no candidates were found on the node") } candidates, err = d.ReconcileUnhealthyLVMVolumeGroups(ctx, candidates, filteredLVGs) if err != nil { - d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] an error has occurred while clearing the LVMVolumeGroups resources. Requeue the request in %s", d.cfg.VolumeGroupScanInterval.String())) + log.Error(err, "an error has occurred while clearing the LVMVolumeGroups resources", + "requeueIn", d.cfg.VolumeGroupScanInterval) return true } shouldRequeue := false for _, candidate := range candidates { if lvg, exist := filteredLVGs[candidate.ActualVGNameOnTheNode]; exist { - d.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is already exist. Tries to update it", lvg.Name)) - d.log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] candidate: %+v", candidate)) - d.log.Trace(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] lvg: %+v", lvg)) + log := log.WithValues("lvgName", lvg.Name) + log.Debug("the LVMVolumeGroup is already exist. Tries to update it") + log.Trace("candidate and lvg", "candidate", candidate, "lvg", lvg) - if !hasLVMVolumeGroupDiff(d.log, lvg, candidate) { - d.log.Debug(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] no data to update for LVMVolumeGroup, name: "%s"`, lvg.Name)) + if !hasLVMVolumeGroupDiff(log, lvg, candidate) { + log.Debug("no data to update for LVMVolumeGroup") err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") if err != nil { - d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGReady) shouldRequeue = true } continue } - d.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s should be updated", lvg.Name)) + log.Debug("the LVMVolumeGroup should be updated") if err = d.UpdateLVMVolumeGroupByCandidate(ctx, &lvg, candidate); err != nil { - d.log.Error(err, fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] unable to update LVMVolumeGroup, name: "%s". Requeue the request in %s`, - lvg.Name, d.cfg.VolumeGroupScanInterval.String())) + log.Error(err, "unable to update LVMVolumeGroup", + "requeueIn", d.cfg.VolumeGroupScanInterval) shouldRequeue = true continue } - d.log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] updated LVMVolumeGroup, name: "%s"`, lvg.Name)) + log.Info("updated LVMVolumeGroup") } else { - d.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] the LVMVolumeGroup %s is not yet created. Create it", candidate.LVMVGName)) + log.Debug("the LVMVolumeGroup is not yet created. Create it", + "candidateName", candidate.LVMVGName) createdLvg, err := d.CreateLVMVolumeGroupByCandidate(ctx, candidate) if err != nil { - d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to CreateLVMVolumeGroupByCandidate %s. Requeue the request in %s", candidate.LVMVGName, d.cfg.VolumeGroupScanInterval.String())) + log.Error(err, "unable to CreateLVMVolumeGroupByCandidate", + "candidateName", candidate.LVMVGName, + "requeueIn", d.cfg.VolumeGroupScanInterval) shouldRequeue = true continue } + log := log.WithValues("lvgName", createdLvg.Name) + err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionTrue, internal.TypeVGConfigurationApplied, internal.ReasonApplied, "all configuration has been applied") if err != nil { - d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, createdLvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "lvgName", createdLvg.Name) shouldRequeue = true continue } err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") if err != nil { - d.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, createdLvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGReady) shouldRequeue = true continue } - d.log.Info(fmt.Sprintf(`[RunLVMVolumeGroupDiscoverController] created new APILVMVolumeGroup, name: "%s"`, createdLvg.Name)) + log.Info("created new APILVMVolumeGroup") } } if shouldRequeue { - d.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupDiscoverController] some problems have been occurred while iterating the lvmvolumegroup resources. Retry the reconcile in %s", d.cfg.VolumeGroupScanInterval.String())) + log.Warning("some problems have been occurred while iterating the lvmvolumegroup resources", + "retryIn", d.cfg.VolumeGroupScanInterval.String()) return true } - d.log.Info("[RunLVMVolumeGroupDiscoverController] END discovery loop") + log.Info("END discovery loop") d.metrics.ReconcileDuration(DiscovererName).Observe(d.metrics.GetEstimatedTimeInSeconds(reconcileStart)) d.metrics.ReconcilesCountTotal(DiscovererName).Inc() return false @@ -241,6 +258,7 @@ func (d *Discoverer) ReconcileUnhealthyLVMVolumeGroups( candidates []internal.LVMVolumeGroupCandidate, lvgs map[string]v1alpha1.LVMVolumeGroup, ) ([]internal.LVMVolumeGroupCandidate, error) { + log := d.log.WithName("ReconcileUnhealthyLVMVolumeGroups") candidateMap := make(map[string]internal.LVMVolumeGroupCandidate, len(candidates)) for _, candidate := range candidates { candidateMap[candidate.ActualVGNameOnTheNode] = candidate @@ -249,12 +267,13 @@ func (d *Discoverer) ReconcileUnhealthyLVMVolumeGroups( var err error for _, lvg := range lvgs { + log := log.WithValues("lvgName", lvg.Name, "vgName", lvg.Spec.ActualVGNameOnTheNode) // this means VG was actually created on the node before if len(lvg.Status.VGUuid) > 0 { messageBldr := strings.Builder{} candidate, exist := candidateMap[lvg.Spec.ActualVGNameOnTheNode] if !exist { - d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its VG %s", lvg.Name, lvg.Spec.ActualVGNameOnTheNode)) + log.Warning("the LVMVolumeGroup misses its VG") messageBldr.WriteString(fmt.Sprintf("Unable to find VG %s (it should be created with special tag %s). ", lvg.Spec.ActualVGNameOnTheNode, internal.LVMTags[0])) } else { // candidate exists, check thin pools @@ -265,15 +284,18 @@ func (d *Discoverer) ReconcileUnhealthyLVMVolumeGroups( // take thin-pools from status instead of spec to prevent miss never-created ones for i, statusTp := range lvg.Status.ThinPools { + log := log.WithValues("thinPoolName", statusTp.Name) if candidateTp, exist := candidateTPs[statusTp.Name]; !exist { - d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s misses its ThinPool %s", lvg.Name, statusTp.Name)) + log.Warning("the LVMVolumeGroup misses its ThinPool") messageBldr.WriteString(fmt.Sprintf("Unable to find ThinPool %s. ", statusTp.Name)) lvg.Status.ThinPools[i].Ready = false } else if !utils.AreSizesEqualWithinDelta(candidate.VGSize, statusTp.ActualSize, internal.ResizeDelta) && candidateTp.ActualSize.Value()+internal.ResizeDelta.Value() < statusTp.ActualSize.Value() { // that means thin-pool is not 100%VG space // use candidate VGSize as lvg.Status.VGSize might not be updated yet - d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s ThinPool %s size %s is less than status one %s", lvg.Name, statusTp.Name, candidateTp.ActualSize.String(), statusTp.ActualSize.String())) + log.Warning("the LVMVolumeGroup ThinPool size is less than status one", + "candidateSize", candidateTp.ActualSize, + "statusSize", statusTp.ActualSize) messageBldr.WriteString(fmt.Sprintf("ThinPool %s on the node has size %s which is less than status one %s. ", statusTp.Name, candidateTp.ActualSize.String(), statusTp.ActualSize.String())) } } @@ -282,26 +304,27 @@ func (d *Discoverer) ReconcileUnhealthyLVMVolumeGroups( if messageBldr.Len() > 0 { err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, &lvg, metav1.ConditionFalse, internal.TypeVGReady, internal.ReasonScanFailed, messageBldr.String()) if err != nil { - d.log.Error(err, fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] unable to update the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to update the LVMVolumeGroup") return nil, err } - d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] the LVMVolumeGroup %s and its data object will be removed from the reconcile due to unhealthy states", lvg.Name)) + log.Warning("the LVMVolumeGroup and its data object will be removed from the reconcile due to unhealthy states") vgNamesToSkip[candidate.ActualVGNameOnTheNode] = struct{}{} } } } for _, lvg := range lvgs { + log := log.WithValues("lvgName", lvg.Name) if _, shouldSkip := vgNamesToSkip[lvg.Spec.ActualVGNameOnTheNode]; shouldSkip { - d.log.Warning(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] remove the LVMVolumeGroup %s from the reconcile", lvg.Name)) + log.Warning("remove the LVMVolumeGroup from the reconcile") delete(lvgs, lvg.Spec.ActualVGNameOnTheNode) } } for i, c := range candidates { if _, shouldSkip := vgNamesToSkip[c.ActualVGNameOnTheNode]; shouldSkip { - d.log.Debug(fmt.Sprintf("[ReconcileUnhealthyLVMVolumeGroups] remove the data object for VG %s from the reconcile", c.ActualVGNameOnTheNode)) + log.Debug("remove the data object for VG from the reconcile", "vgName", c.ActualVGNameOnTheNode) candidates = append(candidates[:i], candidates[i+1:]...) } } @@ -310,6 +333,7 @@ func (d *Discoverer) ReconcileUnhealthyLVMVolumeGroups( } func (d *Discoverer) GetLVMVolumeGroupCandidates(bds map[string]v1alpha1.BlockDevice) ([]internal.LVMVolumeGroupCandidate, error) { + log := d.log.WithName("GetLVMVolumeGroupCandidates") vgs, vgErrs := d.sdsCache.GetVGs() vgWithTag := filterVGByTag(vgs, internal.LVMTags) candidates := make([]internal.LVMVolumeGroupCandidate, 0, len(vgWithTag)) @@ -322,22 +346,22 @@ func (d *Discoverer) GetLVMVolumeGroupCandidates(bds map[string]v1alpha1.BlockDe // If vgErrs is not empty, that means we have some problems on vgs, so we need to identify unhealthy vgs. var vgIssues map[string]string if vgErrs.Len() != 0 { - d.log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing vgs command") - vgIssues = d.sortVGIssuesByVG(d.log, vgWithTag) + log.Warning("some errors have been occurred while executing vgs command") + vgIssues = d.sortVGIssuesByVG(log, vgWithTag) } pvs, pvErrs := d.sdsCache.GetPVs() if len(pvs) == 0 { err := errors.New("no PV found") - d.log.Error(err, "[GetLVMVolumeGroupCandidates] no PV was found, but VG with tags are not empty") + log.Error(err, "no PV was found, but VG with tags are not empty") return nil, err } // If pvErrs is not empty, that means we have some problems on vgs, so we need to identify unhealthy vgs. var pvIssues map[string][]string if pvErrs.Len() != 0 { - d.log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing pvs command") - pvIssues = d.sortPVIssuesByVG(d.log, pvs) + log.Warning("some errors have been occurred while executing pvs command") + pvIssues = d.sortPVIssuesByVG(log, pvs) } lvs, lvErrs := d.sdsCache.GetLVs() @@ -350,15 +374,15 @@ func (d *Discoverer) GetLVMVolumeGroupCandidates(bds map[string]v1alpha1.BlockDe // If lvErrs is not empty, that means we have some problems on vgs, so we need to identify unhealthy vgs. var lvIssues map[string]map[string]string if lvErrs.Len() != 0 { - d.log.Warning("[GetLVMVolumeGroupCandidates] some errors have been occurred while executing lvs command") - lvIssues = d.sortThinPoolIssuesByVG(d.log, thinPools) + log.Warning("some errors have been occurred while executing lvs command") + lvIssues = d.sortThinPoolIssuesByVG(log, thinPools) } // Sort PV,BlockDevices and LV by VG to fill needed information for LVMVolumeGroup resource further. sortedPVs := sortPVsByVG(pvs, vgWithTag) sortedBDs := sortBlockDevicesByVG(bds, vgWithTag) - d.log.Trace(fmt.Sprintf("[GetLVMVolumeGroupCandidates] BlockDevices: %+v", bds)) - d.log.Trace(fmt.Sprintf("[GetLVMVolumeGroupCandidates] Sorted BlockDevices: %+v", sortedBDs)) + log.Trace("BlockDevices", "blockDevices", bds) + log.Trace("Sorted BlockDevices", "sortedBlockDevices", sortedBDs) sortedThinPools := sortThinPoolsByVG(thinPools, vgWithTag) sortedLVByThinPool := sortLVByThinPool(lvs) @@ -448,12 +472,14 @@ func (d *Discoverer) UpdateLVMVolumeGroupByCandidate( lvg *v1alpha1.LVMVolumeGroup, candidate internal.LVMVolumeGroupCandidate, ) error { + log := d.log.WithName("UpdateLVMVolumeGroupByCandidate").WithValues("lvgName", lvg.Name) // Check if VG has some problems if candidate.Health == internal.NonOperational { - d.log.Warning(fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] candidate for LVMVolumeGroup %s has NonOperational health, message %s. Update the VGReady condition to False", lvg.Name, candidate.Message)) + log := log.WithValues("message", candidate.Message) + log.Warning("candidate for LVMVolumeGroup has NonOperational health. Update the VGReady condition to False") updErr := d.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, metav1.ConditionFalse, internal.TypeVGReady, internal.ReasonScanFailed, candidate.Message) if updErr != nil { - d.log.Error(updErr, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + log.Error(updErr, "unable to add a condition to the LVMVolumeGroup", "conditionType", internal.TypeVGReady) } return updErr } @@ -480,7 +506,7 @@ func (d *Discoverer) UpdateLVMVolumeGroupByCandidate( } thinPools, err := convertStatusThinPools(*lvg, candidate.StatusThinPools) if err != nil { - d.log.Error(err, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to convert status thin pools for the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to convert status thin pools for the LVMVolumeGroup") return err } @@ -502,13 +528,14 @@ func (d *Discoverer) UpdateLVMVolumeGroupByCandidate( err = d.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, metav1.ConditionTrue, internal.TypeVGReady, internal.ReasonUpdated, "ready to create LV") if err != nil { - d.log.Error(err, fmt.Sprintf("[UpdateLVMVolumeGroupByCandidate] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGReady, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", "conditionType", internal.TypeVGReady) } return err } func (d *Discoverer) configureCandidateNodeDevices(pvs map[string][]internal.PVData, bds map[string][]v1alpha1.BlockDevice, vg internal.VGData, currentNode string) map[string][]internal.LVMVGDevice { + log := d.log.WithName("configureCandidateNodeDevices").WithValues("vgName", vg.VGName) filteredPV := pvs[vg.VGName+vg.VGUUID] filteredBds := bds[vg.VGName+vg.VGUUID] bdPathStatus := make(map[string]v1alpha1.BlockDevice, len(bds)) @@ -519,12 +546,13 @@ func (d *Discoverer) configureCandidateNodeDevices(pvs map[string][]internal.PVD } for _, pv := range filteredPV { + log := log.WithValues("pvName", pv.PVName) bd, exist := bdPathStatus[pv.PVName] // this is very rare case which might occurred while VG extend operation goes. In this case, in the cache the controller // sees a new PV included in the VG, but BlockDeviceDiscover did not update the corresponding BlockDevice resource on time, // so the BlockDevice resource does not have any info, that it is in the VG. if !exist { - d.log.Warning(fmt.Sprintf("[configureCandidateNodeDevices] no BlockDevice resource is yet configured for PV %s in VG %s, retry on the next iteration", pv.PVName, vg.VGName)) + log.Warning("no BlockDevice resource is yet configured for PV in VG, retry on the next iteration") continue } @@ -585,20 +613,22 @@ func removeDuplicates(strList []string) []string { } func (d *Discoverer) sortThinPoolIssuesByVG(log logger.Logger, lvs []internal.LVData) map[string]map[string]string { + log = log.WithName("sortThinPoolIssuesByVG") var lvIssuesByVG = make(map[string]map[string]string, len(lvs)) for _, lv := range lvs { + log := log.WithValues("lvName", lv.LVName) _, cmd, stdErr, err := d.commands.GetLV(lv.VGName, lv.LVName) - log.Debug(fmt.Sprintf("[sortThinPoolIssuesByVG] runs cmd: %s", cmd)) + log.Debug("runs cmd", "cmd", cmd) if err != nil { - log.Error(err, fmt.Sprintf(`[sortThinPoolIssuesByVG] unable to run lvs command for lv, name: "%s"`, lv.LVName)) + log.Error(err, "unable to run lvs command for lv") lvIssuesByVG[lv.VGName+lv.VGUuid] = make(map[string]string, len(lvs)) lvIssuesByVG[lv.VGName+lv.VGUuid][lv.LVName] = err.Error() } if stdErr.Len() != 0 { - log.Error(errors.New(stdErr.String()), fmt.Sprintf(`[sortThinPoolIssuesByVG] lvs command for lv "%s" has stderr: `, lv.LVName)) + log.Error(errors.New(stdErr.String()), "lvs command for lv has stderr") lvIssuesByVG[lv.VGName+lv.VGUuid] = make(map[string]string, len(lvs)) lvIssuesByVG[lv.VGName+lv.VGUuid][lv.LVName] = stdErr.String() stdErr.Reset() @@ -609,19 +639,21 @@ func (d *Discoverer) sortThinPoolIssuesByVG(log logger.Logger, lvs []internal.LV } func (d *Discoverer) sortPVIssuesByVG(log logger.Logger, pvs []internal.PVData) map[string][]string { + log = log.WithName("sortPVIssuesByVG") pvIssuesByVG := make(map[string][]string, len(pvs)) for _, pv := range pvs { + log := log.WithValues("pvName", pv.PVName) _, cmd, stdErr, err := d.commands.GetPV(pv.PVName) - log.Debug(fmt.Sprintf("[sortPVIssuesByVG] runs cmd: %s", cmd)) + log.Debug("runs cmd", "cmd", cmd) if err != nil { - log.Error(err, fmt.Sprintf(`[sortPVIssuesByVG] unable to run pvs command for pv "%s"`, pv.PVName)) + log.Error(err, "unable to run pvs command for pv") pvIssuesByVG[pv.VGName+pv.VGUuid] = append(pvIssuesByVG[pv.VGName+pv.VGUuid], err.Error()) } if stdErr.Len() != 0 { - log.Error(errors.New(stdErr.String()), fmt.Sprintf(`[sortPVIssuesByVG] pvs command for pv "%s" has stderr: %s`, pv.PVName, stdErr.String())) + log.Error(errors.New(stdErr.String()), "pvs command for pv has stderr", "stderr", stdErr.String()) pvIssuesByVG[pv.VGName+pv.VGUuid] = append(pvIssuesByVG[pv.VGName+pv.VGUuid], stdErr.String()) stdErr.Reset() } @@ -631,17 +663,19 @@ func (d *Discoverer) sortPVIssuesByVG(log logger.Logger, pvs []internal.PVData) } func (d *Discoverer) sortVGIssuesByVG(log logger.Logger, vgs []internal.VGData) map[string]string { + log = log.WithName("sortVGIssuesByVG") vgIssues := make(map[string]string, len(vgs)) for _, vg := range vgs { + log := log.WithValues("vgName", vg.VGName) _, cmd, stdErr, err := d.commands.GetVG(vg.VGName) - log.Debug(fmt.Sprintf("[sortVGIssuesByVG] runs cmd: %s", cmd)) + log.Debug("runs cmd", "cmd", cmd) if err != nil { - log.Error(err, fmt.Sprintf(`[sortVGIssuesByVG] unable to run vgs command for vg, name: "%s"`, vg.VGName)) + log.Error(err, "unable to run vgs command for vg") vgIssues[vg.VGName+vg.VGUUID] = err.Error() } if stdErr.Len() != 0 { - log.Error(errors.New(stdErr.String()), fmt.Sprintf(`[sortVGIssuesByVG] vgs command for vg "%s" has stderr: `, vg.VGName)) + log.Error(errors.New(stdErr.String()), "vgs command for vg has stderr") vgIssues[vg.VGName+vg.VGUUID] = stdErr.String() stdErr.Reset() } @@ -739,14 +773,15 @@ func getThinPools(lvs []internal.LVData) []internal.LVData { } func getStatusThinPools(log logger.Logger, thinPools, sortedLVs map[string][]internal.LVData, vg internal.VGData, lvIssues map[string]map[string]string) []internal.LVMVGStatusThinPool { + log = log.WithName("getStatusThinPools").WithValues("vgName", vg.VGName) tps := thinPools[vg.VGName+vg.VGUUID] result := make([]internal.LVMVGStatusThinPool, 0, len(tps)) for _, thinPool := range tps { usedSize, err := thinPool.GetUsedSize() - log.Trace(fmt.Sprintf("[getStatusThinPools] LV %v for VG name %s", thinPool, vg.VGName)) + log.Trace("LV for VG name", "lv", thinPool) if err != nil { - log.Error(err, "[getStatusThinPools] unable to getThinPoolUsedSize") + log.Error(err, "unable to getThinPoolUsedSize") } allocatedSize := getThinPoolAllocatedSize(thinPool.LVName, sortedLVs[thinPool.LVName]) @@ -803,22 +838,35 @@ func filterLVGsByNode(lvgs map[string]v1alpha1.LVMVolumeGroup, currentNode strin } func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LVMVolumeGroup, candidate internal.LVMVolumeGroupCandidate) bool { + log = log.WithName("hasLVMVolumeGroupDiff") convertedStatusPools, err := convertStatusThinPools(lvg, candidate.StatusThinPools) if err != nil { - log.Error(err, fmt.Sprintf("[hasLVMVolumeGroupDiff] unable to identify candidate difference for the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to identify candidate difference for the LVMVolumeGroup", "lvgName", lvg.Name) return false } - log.Trace(fmt.Sprintf(`AllocatedSize, candidate: %s, lvg: %s`, candidate.AllocatedSize.String(), lvg.Status.AllocatedSize.String())) - log.Trace(fmt.Sprintf(`ThinPools, candidate: %+v, lvg: %+v`, convertedStatusPools, lvg.Status.ThinPools)) + log.Trace("AllocatedSize comparison", + "candidate", candidate.AllocatedSize, + "lvg", lvg.Status.AllocatedSize) + log.Trace("ThinPools comparison", "candidate", convertedStatusPools, "lvg", lvg.Status.ThinPools) for _, tp := range convertedStatusPools { - log.Trace(fmt.Sprintf("Candidate ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String())) + log.Trace("Candidate ThinPool", + "name", tp.Name, + "actualSize", tp.ActualSize, + "usedSize", tp.UsedSize) } for _, tp := range lvg.Status.ThinPools { - log.Trace(fmt.Sprintf("Resource ThinPool name: %s, actual size: %s, used size: %s", tp.Name, tp.ActualSize.String(), tp.UsedSize.String())) - } - log.Trace(fmt.Sprintf(`VGSize, candidate: %s, lvg: %s`, candidate.VGSize.String(), lvg.Status.VGSize.String())) - log.Trace(fmt.Sprintf(`VGUUID, candidate: %s, lvg: %s`, candidate.VGUUID, lvg.Status.VGUuid)) - log.Trace(fmt.Sprintf(`Nodes, candidate: %+v, lvg: %+v`, convertLVMVGNodes(candidate.Nodes), lvg.Status.Nodes)) + log.Trace("Resource ThinPool", + "name", tp.Name, + "actualSize", tp.ActualSize, + "usedSize", tp.UsedSize) + } + log.Trace("VGSize, VGUUID, and Nodes comparison", + "candidateVGSize", candidate.VGSize, + "lvgVGSize", lvg.Status.VGSize, + "candidateVGUUID", candidate.VGUUID, + "lvgVGUUID", lvg.Status.VGUuid, + "candidateNodes", convertLVMVGNodes(candidate.Nodes), + "lvgNodes", lvg.Status.Nodes) return candidate.AllocatedSize.Value() != lvg.Status.AllocatedSize.Value() || hasStatusPoolDiff(convertedStatusPools, lvg.Status.ThinPools) || @@ -829,6 +877,7 @@ func hasLVMVolumeGroupDiff(log logger.Logger, lvg v1alpha1.LVMVolumeGroup, candi } func hasStatusNodesDiff(log logger.Logger, first, second []v1alpha1.LVMVolumeGroupNode) bool { + log = log.WithName("hasStatusNodesDiff") if len(first) != len(second) { return true } @@ -843,8 +892,14 @@ func hasStatusNodesDiff(log logger.Logger, first, second []v1alpha1.LVMVolumeGro } for j := range first[i].Devices { - log.Trace(fmt.Sprintf("[hasStatusNodesDiff] first Device: name %s, PVSize %s, DevSize %s", first[i].Devices[j].BlockDevice, first[i].Devices[j].PVSize.String(), first[i].Devices[j].DevSize.String())) - log.Trace(fmt.Sprintf("[hasStatusNodesDiff] second Device: name %s, PVSize %s, DevSize %s", second[i].Devices[j].BlockDevice, second[i].Devices[j].PVSize.String(), second[i].Devices[j].DevSize.String())) + log.Trace("first Device", + "name", first[i].Devices[j].BlockDevice, + "PVSize", first[i].Devices[j].PVSize, + "DevSize", first[i].Devices[j].DevSize) + log.Trace("second Device", + "name", second[i].Devices[j].BlockDevice, + "PVSize", second[i].Devices[j].PVSize, + "DevSize", second[i].Devices[j].DevSize) if first[i].Devices[j].BlockDevice != second[i].Devices[j].BlockDevice || first[i].Devices[j].Path != second[i].Devices[j].Path || first[i].Devices[j].PVUuid != second[i].Devices[j].PVUuid || diff --git a/images/agent/internal/controller/lvg/reconciler.go b/images/agent/internal/controller/lvg/reconciler.go index ea0b0d2c3..3e4a36c1c 100644 --- a/images/agent/internal/controller/lvg/reconciler.go +++ b/images/agent/internal/controller/lvg/reconciler.go @@ -104,28 +104,31 @@ func (r *Reconciler) ShouldReconcileCreate(_ *v1alpha1.LVMVolumeGroup) bool { // Reconcile implements controller.Reconciler. func (r *Reconciler) Reconcile(ctx context.Context, request controller.ReconcileRequest[*v1alpha1.LVMVolumeGroup]) (controller.Result, error) { - r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Reconciler starts to reconcile the request %s", request.Object.Name)) - lvg := request.Object + log := r.log.WithName("Reconcile") + if lvg != nil { + log = log.WithValues("lvgName", lvg.Name, "nodeName", r.cfg.NodeName) + } + log.Info("Reconciler starts to reconcile the request") belongs := checkIfLVGBelongsToNode(lvg, r.cfg.NodeName) if !belongs { - r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s does not belong to the node %s", lvg.Name, r.cfg.NodeName)) + log.Info("the LVMVolumeGroup does not belong to the node") return controller.Result{}, nil } - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s belongs to the node %s. Starts to reconcile", lvg.Name, r.cfg.NodeName)) + log.Debug("the LVMVolumeGroup belongs to the node. Starts to reconcile") - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add the finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Debug("tries to add the finalizer to the LVMVolumeGroup", "finalizer", internal.SdsNodeConfiguratorFinalizer) added, err := r.addLVGFinalizerIfNotExist(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add the finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Error(err, "unable to add the finalizer to the LVMVolumeGroup", "finalizer", internal.SdsNodeConfiguratorFinalizer) return controller.Result{}, err } if added { - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully added a finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Debug("successfully added a finalizer to the LVMVolumeGroup", "finalizer", internal.SdsNodeConfiguratorFinalizer) } else { - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add a finalizer %s to the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Debug("no need to add a finalizer to the LVMVolumeGroup", "finalizer", internal.SdsNodeConfiguratorFinalizer) } // this case handles the situation when a user decides to remove LVMVolumeGroup resource without created VG @@ -135,7 +138,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile } if deleted { - r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s was deleted, stop the reconciliation", lvg.Name)) + log.Info("the LVMVolumeGroup was deleted, stop the reconciliation") return controller.Result{}, nil } @@ -143,16 +146,17 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile delete(lvg.Labels, internal.LVGUpdateTriggerLabel) err = r.cl.Update(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to update the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to update the LVMVolumeGroup") return controller.Result{}, err } - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully removed the label %s from the LVMVolumeGroup %s", internal.LVGUpdateTriggerLabel, lvg.Name)) + log.Debug("successfully removed the label from the LVMVolumeGroup", "label", internal.LVGUpdateTriggerLabel) } - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to get block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector)) + log.Debug("tries to get block device resources for the LVMVolumeGroup by the selector", + "selector", lvg.Spec.BlockDeviceSelector) blockDevices, err := r.bdCl.GetAPIBlockDevices(ctx, ReconcilerName, lvg.Spec.BlockDeviceSelector) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to get BlockDevices. Retry in %s", r.cfg.BlockDeviceScanInterval.String())) + log.Error(err, "unable to get BlockDevices", "retryIn", r.cfg.BlockDeviceScanInterval) err = r.lvgCl.UpdateLVGConditionIfNeeded( ctx, lvg, @@ -162,18 +166,22 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile fmt.Sprintf("unable to get block devices resources, err: %s", err.Error()), ) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, r.cfg.BlockDeviceScanInterval.String())) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "retryIn", r.cfg.BlockDeviceScanInterval) } return controller.Result{RequeueAfter: r.cfg.BlockDeviceScanInterval}, nil } - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully got block device resources for the LVMVolumeGroup %s by the selector %v", lvg.Name, lvg.Spec.BlockDeviceSelector)) + log.Debug("successfully got block device resources for the LVMVolumeGroup by the selector", + "selector", lvg.Spec.BlockDeviceSelector) blockDevices = filterBlockDevicesByNodeName(blockDevices, lvg.Spec.Local.NodeName) valid, reason := validateSpecBlockDevices(lvg, blockDevices) if !valid { - r.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupController] validation failed for the LVMVolumeGroup %s, reason: %s", lvg.Name, reason)) + log.Warning("validation failed for the LVMVolumeGroup", + "reason", reason) err = r.lvgCl.UpdateLVGConditionIfNeeded( ctx, lvg, @@ -183,31 +191,33 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile reason, ) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, r.cfg.VolumeGroupScanInterval.String())) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "retryIn", r.cfg.VolumeGroupScanInterval) return controller.Result{}, err } return controller.Result{}, nil } - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully validated BlockDevices of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully validated BlockDevices of the LVMVolumeGroup") - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to add label %s to the LVMVolumeGroup %s", internal.LVGMetadataNameLabelKey, r.cfg.NodeName)) + log.Debug("tries to add label to the LVMVolumeGroup", "labelKey", internal.LVGMetadataNameLabelKey) added, err = r.addLVGLabelIfNeeded(ctx, lvg, internal.LVGMetadataNameLabelKey, lvg.Name) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add label %s to the LVMVolumeGroup %s", internal.LVGMetadataNameLabelKey, lvg.Name)) + log.Error(err, "unable to add label to the LVMVolumeGroup", "labelKey", internal.LVGMetadataNameLabelKey) return controller.Result{}, err } if added { - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully added label %s to the LVMVolumeGroup %s", internal.LVGMetadataNameLabelKey, lvg.Name)) + log.Debug("successfully added label to the LVMVolumeGroup", "labelKey", internal.LVGMetadataNameLabelKey) } else { - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to add label %s to the LVMVolumeGroup %s", internal.LVGMetadataNameLabelKey, lvg.Name)) + log.Debug("no need to add label to the LVMVolumeGroup", "labelKey", internal.LVGMetadataNameLabelKey) } // We do this after BlockDevices validation and node belonging check to prevent multiple updates by all agents pods bds, _ := r.sdsCache.GetDevices() if len(bds) == 0 { - r.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no block devices in the cache, add the LVMVolumeGroup %s to requeue", lvg.Name)) + log.Warning("no block devices in the cache, add the LVMVolumeGroup to requeue") err = r.lvgCl.UpdateLVGConditionIfNeeded( ctx, lvg, @@ -217,7 +227,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile "unable to apply configuration due to the cache's state", ) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s. Retry in %s", internal.TypeVGConfigurationApplied, lvg.Name, r.cfg.VolumeGroupScanInterval.String())) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "retryIn", r.cfg.VolumeGroupScanInterval) } return controller.Result{ @@ -225,25 +237,25 @@ func (r *Reconciler) Reconcile(ctx context.Context, request controller.Reconcile }, nil } - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] tries to sync status and spec thin-pool AllocationLimit fields for the LVMVolumeGroup %s", lvg.Name)) + log.Debug("tries to sync status and spec thin-pool AllocationLimit fields for the LVMVolumeGroup") err = r.syncThinPoolsAllocationLimit(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to sync status and spec thin-pool AllocationLimit fields for the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to sync status and spec thin-pool AllocationLimit fields for the LVMVolumeGroup") return controller.Result{}, err } shouldRequeue, err := r.runEventReconcile(ctx, lvg, blockDevices) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to reconcile the LVMVolumeGroup") } if shouldRequeue { - r.log.Warning(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] the LVMVolumeGroup %s event will be requeued in %s", lvg.Name, r.cfg.VolumeGroupScanInterval.String())) + log.Warning("the LVMVolumeGroup event will be requeued", "requeueIn", r.cfg.VolumeGroupScanInterval) return controller.Result{ RequeueAfter: r.cfg.VolumeGroupScanInterval, }, nil } - r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] Reconciler successfully reconciled the LVMVolumeGroup %s", lvg.Name)) + log.Info("Reconciler successfully reconciled the LVMVolumeGroup") return controller.Result{}, nil } @@ -253,34 +265,36 @@ func (r *Reconciler) runEventReconcile( lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, ) (bool, error) { + log := r.log.WithName("runEventReconcile").WithValues("lvgName", lvg.Name) recType := r.identifyLVGReconcileFunc(lvg) switch recType { case internal.CreateReconcile: - r.log.Info(fmt.Sprintf("[runEventReconcile] CreateReconcile starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) + log.Info("CreateReconcile starts the reconciliation for the LVMVolumeGroup") return r.reconcileLVGCreateFunc(ctx, lvg, blockDevices) case internal.UpdateReconcile: - r.log.Info(fmt.Sprintf("[runEventReconcile] UpdateReconcile starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) + log.Info("UpdateReconcile starts the reconciliation for the LVMVolumeGroup") return r.reconcileLVGUpdateFunc(ctx, lvg, blockDevices) case internal.DeleteReconcile: - r.log.Info(fmt.Sprintf("[runEventReconcile] DeleteReconcile starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) + log.Info("DeleteReconcile starts the reconciliation for the LVMVolumeGroup") return r.reconcileLVGDeleteFunc(ctx, lvg) default: - r.log.Info(fmt.Sprintf("[runEventReconcile] no need to reconcile the LVMVolumeGroup %s", lvg.Name)) + log.Info("no need to reconcile the LVMVolumeGroup") } return false, nil } func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status false to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log := r.log.WithName("reconcileLVGDeleteFunc").WithValues("lvgName", lvg.Name) + log.Debug("starts to reconcile the LVMVolumeGroup") + log.Debug("tries to add the condition status false to the LVMVolumeGroup", "conditionType", internal.TypeVGConfigurationApplied) // this check prevents the LVMVolumeGroup resource's infinity updating after a retry for _, c := range lvg.Status.Conditions { if c.Type == internal.TypeVGConfigurationApplied && c.Reason != internal.ReasonTerminating { err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, "trying to delete VG") if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", "conditionType", internal.TypeVGConfigurationApplied) return true, err } break @@ -289,7 +303,8 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L _, exist := lvg.Annotations[internal.DeletionProtectionAnnotation] if exist { - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] the LVMVolumeGroup %s has a deletion timestamp but also has a deletion protection annotation %s. Remove it to proceed the delete operation", lvg.Name, internal.DeletionProtectionAnnotation)) + log.Debug("the LVMVolumeGroup has a deletion timestamp but also has a deletion protection annotation. Remove it to proceed the delete operation", + "annotation", internal.DeletionProtectionAnnotation) err := r.lvgCl.UpdateLVGConditionIfNeeded( ctx, lvg, @@ -299,35 +314,41 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L fmt.Sprintf("to delete the LVG remove the annotation %s", internal.DeletionProtectionAnnotation), ) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) return true, err } return false, nil } - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] check if VG %s of the LVMVolumeGroup %s uses LVs", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log.Debug("check if VG of the LVMVolumeGroup uses LVs", "vgName", lvg.Spec.ActualVGNameOnTheNode) usedLVs := r.getLVForVG(lvg.Spec.ActualVGNameOnTheNode) if len(usedLVs) > 0 { err := fmt.Errorf("VG %s uses LVs: %v. Delete used LVs first", lvg.Spec.ActualVGNameOnTheNode, usedLVs) - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to reconcile LVG %s", lvg.Name)) - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] tries to add the condition %s status False to the LVMVolumeGroup %s due to LV does exist", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to reconcile LVG") + log.Debug("tries to add the condition status False to the LVMVolumeGroup due to LV does exist", + "conditionType", internal.TypeVGConfigurationApplied) err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) return true, err } return true, nil } - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] VG %s of the LVMVolumeGroup %s does not use any LV. Start to delete the VG", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log.Debug("VG of the LVMVolumeGroup does not use any LV. Start to delete the VG", + "vgName", lvg.Spec.ActualVGNameOnTheNode) err := r.deleteVGIfExist(lvg.Spec.ActualVGNameOnTheNode) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to delete VG %s", lvg.Spec.ActualVGNameOnTheNode)) + log.Error(err, "unable to delete VG", + "vgName", lvg.Spec.ActualVGNameOnTheNode) err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) return true, err } @@ -336,27 +357,31 @@ func (r *Reconciler) reconcileLVGDeleteFunc(ctx context.Context, lvg *v1alpha1.L removed, err := r.removeLVGFinalizerIfExist(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to remove a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Error(err, "unable to remove a finalizer from the LVMVolumeGroup", + "finalizer", internal.SdsNodeConfiguratorFinalizer) err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonTerminating, err.Error()) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } if removed { - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] successfully removed a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Debug("successfully removed a finalizer from the LVMVolumeGroup", + "finalizer", internal.SdsNodeConfiguratorFinalizer) } else { - r.log.Debug(fmt.Sprintf("[reconcileLVGDeleteFunc] no need to remove a finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Debug("no need to remove a finalizer from the LVMVolumeGroup", + "finalizer", internal.SdsNodeConfiguratorFinalizer) } err = r.lvgCl.DeleteLVMVolumeGroup(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGDeleteFunc] unable to delete the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to delete the LVMVolumeGroup") return true, err } - r.log.Info(fmt.Sprintf("[reconcileLVGDeleteFunc] successfully reconciled VG %s of the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log.Info("successfully reconciled VG of the LVMVolumeGroup", "vgName", lvg.Spec.ActualVGNameOnTheNode) return false, nil } @@ -365,101 +390,110 @@ func (r *Reconciler) reconcileLVGUpdateFunc( lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, ) (bool, error) { - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) + log := r.log.WithName("reconcileLVGUpdateFunc").WithValues("lvgName", lvg.Name) + log.Debug("starts to reconcile the LVMVolumeGroup") - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to validate the LVMVolumeGroup %s", lvg.Name)) + log.Debug("tries to validate the LVMVolumeGroup") pvs, _ := r.sdsCache.GetPVs() valid, reason := r.validateLVGForUpdateFunc(lvg, blockDevices) if !valid { - r.log.Warning(fmt.Sprintf("[reconcileLVGUpdateFunc] the LVMVolumeGroup %s is not valid", lvg.Name)) + log.Warning("the LVMVolumeGroup is not valid", "reason", reason) err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "reason", internal.ReasonValidationFailed) } return true, err } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully validated the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully validated the LVMVolumeGroup") - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to get VG %s for the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log.Debug("tries to get VG for the LVMVolumeGroup", "vgName", lvg.Spec.ActualVGNameOnTheNode) found, vg := r.tryGetVG(lvg.Spec.ActualVGNameOnTheNode) if !found { err := fmt.Errorf("VG %s not found", lvg.Spec.ActualVGNameOnTheNode) - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to reconcile the LVMVolumeGroup") err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGNotFound", err.Error()) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] VG %s found for the LVMVolumeGroup %s", vg.VGName, lvg.Name)) + log = log.WithValues("vgName", vg.VGName) + log.Debug("VG found for the LVMVolumeGroup") - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to check and update VG %s tag %s", lvg.Spec.ActualVGNameOnTheNode, internal.LVMTags[0])) + log.Debug("tries to check and update VG tag", "vgName", lvg.Spec.ActualVGNameOnTheNode, "tags", internal.LVMTags) updated, err := r.updateVGTagIfNeeded(ctx, lvg, vg) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to update VG %s tag of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) + log.Error(err, "unable to update VG tag of the LVMVolumeGroup") err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGUpdateFailed", fmt.Sprintf("unable to update VG tag, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } if updated { - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully updated VG %s tag of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) + log.Debug("successfully updated VG tag of the LVMVolumeGroup") } else { - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] no need to update VG %s tag of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) + log.Debug("no need to update VG tag of the LVMVolumeGroup") } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to resize PV of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("starts to resize PV of the LVMVolumeGroup") err = r.resizePVIfNeeded(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to resize PV of the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to resize PV of the LVMVolumeGroup") err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "PVResizeFailed", fmt.Sprintf("unable to resize PV, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully ended the resize operation for PV of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully ended the resize operation for PV of the LVMVolumeGroup") - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to extend VG %s of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) + log.Debug("starts to extend VG of the LVMVolumeGroup") err = r.extendVGIfNeeded(ctx, lvg, vg, pvs, blockDevices) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to extend VG of the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to extend VG of the LVMVolumeGroup") err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGExtendFailed", fmt.Sprintf("unable to extend VG, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully ended the extend operation for VG of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully ended the extend operation for VG of the LVMVolumeGroup") if lvg.Spec.ThinPools != nil { - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] starts to reconcile thin-pools of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("starts to reconcile thin-pools of the LVMVolumeGroup") lvs, _ := r.sdsCache.GetLVs() err = r.reconcileThinPoolsIfNeeded(ctx, lvg, vg, lvs) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to reconcile thin-pools of the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to reconcile thin-pools of the LVMVolumeGroup") err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolReconcileFailed", fmt.Sprintf("unable to reconcile thin-pools, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", "conditionType", internal.TypeVGConfigurationApplied) } return true, err } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully reconciled thin-pools operation of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully reconciled thin-pools operation of the LVMVolumeGroup") } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] tries to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Debug("tries to add a condition to the LVMVolumeGroup", "conditionType", internal.TypeVGConfigurationApplied) err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, internal.ReasonApplied, "configuration has been applied") if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGUpdateFunc] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) return true, err } - r.log.Debug(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully added a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) - r.log.Info(fmt.Sprintf("[reconcileLVGUpdateFunc] successfully reconciled the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully added a condition to the LVMVolumeGroup", "conditionType", internal.TypeVGConfigurationApplied) + log.Info("successfully reconciled the LVMVolumeGroup") return false, nil } @@ -469,7 +503,8 @@ func (r *Reconciler) reconcileLVGCreateFunc( lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, ) (bool, error) { - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) + log := r.log.WithName("reconcileLVGCreateFunc").WithValues("lvgName", lvg.Name) + log.Debug("starts to reconcile the LVMVolumeGroup") // this check prevents the LVMVolumeGroup resource's infinity updating after a retry exist := false @@ -481,74 +516,81 @@ func (r *Reconciler) reconcileLVGCreateFunc( } if !exist { - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] tries to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Debug("tries to add the condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonCreating, "trying to apply the configuration") if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) return true, err } } - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] tries to validate the LVMVolumeGroup %s", lvg.Name)) + log.Debug("tries to validate the LVMVolumeGroup") valid, reason := r.validateLVGForCreateFunc(lvg, blockDevices) if !valid { - r.log.Warning(fmt.Sprintf("[reconcileLVGCreateFunc] validation fails for the LVMVolumeGroup %s", lvg.Name)) + log.Warning("validation fails for the LVMVolumeGroup") err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonValidationFailed, reason) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] successfully validated the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully validated the LVMVolumeGroup") - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] tries to create VG for the LVMVolumeGroup %s", lvg.Name)) + log.Debug("tries to create VG for the LVMVolumeGroup") err := r.createVGComplex(lvg, blockDevices) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to create VG for the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to create VG for the LVMVolumeGroup") err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "VGCreationFailed", fmt.Sprintf("unable to create VG, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } - r.log.Info(fmt.Sprintf("[reconcileLVGCreateFunc] successfully created VG for the LVMVolumeGroup %s", lvg.Name)) + log.Info("successfully created VG for the LVMVolumeGroup") if lvg.Spec.ThinPools != nil { - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] the LVMVolumeGroup %s has thin-pools. Tries to create them", lvg.Name)) + log.Debug("the LVMVolumeGroup has thin-pools. Tries to create them") for _, tp := range lvg.Spec.ThinPools { + log := log.WithValues("thinPoolName", tp.Name) vgSize := countVGSizeByBlockDevices(blockDevices) tpRequestedSize, err := utils.GetRequestedSizeFromString(tp.Size, vgSize) if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to get thin-pool %s requested size of the LVMVolumeGroup %s", tp.Name, lvg.Name)) + log.Error(err, "unable to get thin-pool requested size of the LVMVolumeGroup") return false, err } var cmd string if utils.AreSizesEqualWithinDelta(tpRequestedSize, vgSize, internal.ResizeDelta) { - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] Thin-pool %s of the LVMVolumeGroup %s will be created with full VG space size", tp.Name, lvg.Name)) + log.Debug("Thin-pool of the LVMVolumeGroup will be created with full VG space size") cmd, err = r.commands.CreateThinPoolFullVGSpace(tp.Name, lvg.Spec.ActualVGNameOnTheNode) } else { - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] Thin-pool %s of the LVMVolumeGroup %s will be created with size %s", tp.Name, lvg.Name, tpRequestedSize.String())) + log.Debug("Thin-pool of the LVMVolumeGroup will be created with size", "size", tpRequestedSize) cmd, err = r.commands.CreateThinPool(tp.Name, lvg.Spec.ActualVGNameOnTheNode, tpRequestedSize.Value()) } if err != nil { - r.log.Error(err, fmt.Sprintf("[reconcileLVGCreateFunc] unable to create thin-pool %s of the LVMVolumeGroup %s, cmd: %s", tp.Name, lvg.Name, cmd)) + log.Error(err, "unable to create thin-pool of the LVMVolumeGroup", "cmd", cmd) err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, "ThinPoolCreationFailed", fmt.Sprintf("unable to create thin-pool, err: %s", err.Error())) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) } return true, err } } - r.log.Debug(fmt.Sprintf("[reconcileLVGCreateFunc] successfully created thin-pools for the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully created thin-pools for the LVMVolumeGroup") } err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionTrue, internal.TypeVGConfigurationApplied, internal.ReasonApplied, "all configuration has been applied") if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to add a condition %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied) return true, err } @@ -556,19 +598,22 @@ func (r *Reconciler) reconcileLVGCreateFunc( } func (r *Reconciler) shouldUpdateLVGLabels(lvg *v1alpha1.LVMVolumeGroup, labelKey, labelValue string) bool { + log := r.log.WithName("shouldUpdateLVGLabels").WithValues("lvgName", lvg.Name, "labelKey", labelKey) if lvg.Labels == nil { - r.log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has no labels.", lvg.Name)) + log.Debug("the LVMVolumeGroup has no labels") return true } val, exist := lvg.Labels[labelKey] if !exist { - r.log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has no label %s.", lvg.Name, labelKey)) + log.Debug("the LVMVolumeGroup has no label") return true } if val != labelValue { - r.log.Debug(fmt.Sprintf("[shouldUpdateLVGLabels] the LVMVolumeGroup %s has label %s but the value is incorrect - %s (should be %s)", lvg.Name, labelKey, val, labelValue)) + log.Debug("the LVMVolumeGroup has label but the value is incorrect", + "currentValue", val, + "expectedValue", labelValue) return true } @@ -576,39 +621,40 @@ func (r *Reconciler) shouldUpdateLVGLabels(lvg *v1alpha1.LVMVolumeGroup, labelKe } func (r *Reconciler) shouldLVGWatcherReconcileUpdateEvent(oldLVG, newLVG *v1alpha1.LVMVolumeGroup) bool { + log := r.log.WithName("shouldLVGWatcherReconcileUpdateEvent").WithValues("lvgName", newLVG.Name) if newLVG.DeletionTimestamp != nil { - r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s has deletionTimestamp", newLVG.Name)) + log.Debug("update event should be reconciled as the LVMVolumeGroup has deletionTimestamp") return true } for _, c := range newLVG.Status.Conditions { if c.Type == internal.TypeVGConfigurationApplied { if c.Reason == internal.ReasonUpdating || c.Reason == internal.ReasonCreating { - r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should not be reconciled as the LVMVolumeGroup %s reconciliation still in progress", newLVG.Name)) + log.Debug("update event should not be reconciled as the LVMVolumeGroup reconciliation still in progress") return false } } } if _, exist := newLVG.Labels[internal.LVGUpdateTriggerLabel]; exist { - r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s has the label %s", newLVG.Name, internal.LVGUpdateTriggerLabel)) + log.Debug("update event should be reconciled as the LVMVolumeGroup has the label", "label", internal.LVGUpdateTriggerLabel) return true } if r.shouldUpdateLVGLabels(newLVG, internal.LVGMetadataNameLabelKey, newLVG.Name) { - r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup's %s labels have been changed", newLVG.Name)) + log.Debug("update event should be reconciled as the LVMVolumeGroup's labels have been changed") return true } if !reflect.DeepEqual(oldLVG.Spec, newLVG.Spec) { - r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s configuration has been changed", newLVG.Name)) + log.Debug("update event should be reconciled as the LVMVolumeGroup configuration has been changed") return true } for _, n := range newLVG.Status.Nodes { for _, d := range n.Devices { if !utils.AreSizesEqualWithinDelta(d.PVSize, d.DevSize, internal.ResizeDelta) { - r.log.Debug(fmt.Sprintf("[shouldLVGWatcherReconcileUpdateEvent] update event should be reconciled as the LVMVolumeGroup %s PV size is different to device size", newLVG.Name)) + log.Debug("update event should be reconciled as the LVMVolumeGroup PV size is different to device size") return true } } @@ -632,6 +678,7 @@ func (r *Reconciler) addLVGFinalizerIfNotExist(ctx context.Context, lvg *v1alpha } func (r *Reconciler) syncThinPoolsAllocationLimit(ctx context.Context, lvg *v1alpha1.LVMVolumeGroup) error { + log := r.log.WithName("syncThinPoolsAllocationLimit").WithValues("lvgName", lvg.Name) updated := false tpSpecLimits := make(map[string]string, len(lvg.Spec.ThinPools)) @@ -644,66 +691,75 @@ func (r *Reconciler) syncThinPoolsAllocationLimit(ctx context.Context, lvg *v1al err error ) for i := range lvg.Status.ThinPools { + log := log.WithValues("thinPoolName", lvg.Status.ThinPools[i].Name) if specLimits, matched := tpSpecLimits[lvg.Status.ThinPools[i].Name]; matched { if lvg.Status.ThinPools[i].AllocationLimit != specLimits { - r.log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] thin-pool %s status AllocationLimit: %s of the LVMVolumeGroup %s should be updated by spec one: %s", lvg.Status.ThinPools[i].Name, lvg.Status.ThinPools[i].AllocationLimit, lvg.Name, specLimits)) + log.Debug("thin-pool status AllocationLimit should be updated by spec one", + "currentLimit", lvg.Status.ThinPools[i].AllocationLimit, + "specLimit", specLimits) updated = true lvg.Status.ThinPools[i].AllocationLimit = specLimits space, err = utils.GetThinPoolAvailableSpace(lvg.Status.ThinPools[i].ActualSize, lvg.Status.ThinPools[i].AllocatedSize, specLimits) if err != nil { - r.log.Error(err, fmt.Sprintf("[syncThinPoolsAllocationLimit] unable to get thin pool %s available space", lvg.Status.ThinPools[i].Name)) + log.Error(err, "unable to get thin pool available space") return err } - r.log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] successfully got a new available space %s of the thin-pool %s", space.String(), lvg.Status.ThinPools[i].Name)) + log.Debug("successfully got a new available space of the thin-pool", + "availableSpace", space) lvg.Status.ThinPools[i].AvailableSpace = space } } else { - r.log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] status thin-pool %s of the LVMVolumeGroup %s was not found as used in spec", lvg.Status.ThinPools[i].Name, lvg.Name)) + log.Debug("status thin-pool of the LVMVolumeGroup was not found as used in spec") } } if updated { fmt.Printf("%+v", lvg.Status.ThinPools) - r.log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] tries to update the LVMVolumeGroup %s", lvg.Name)) + log.Debug("tries to update the LVMVolumeGroup") err = r.cl.Status().Update(ctx, lvg) if err != nil { return err } - r.log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] successfully updated the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully updated the LVMVolumeGroup") } else { - r.log.Debug(fmt.Sprintf("[syncThinPoolsAllocationLimit] every status thin-pool AllocationLimit value is synced with spec one for the LVMVolumeGroup %s", lvg.Name)) + log.Debug("every status thin-pool AllocationLimit value is synced with spec one for the LVMVolumeGroup") } return nil } func (r *Reconciler) deleteLVGIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { + log := r.log.WithName("deleteLVGIfNeeded").WithValues("lvgName", lvg.Name) if lvg.DeletionTimestamp == nil { return false, nil } vgs, _ := r.sdsCache.GetVGs() if !checkIfVGExist(lvg.Spec.ActualVGNameOnTheNode, vgs) { - r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] VG %s was not yet created for the LVMVolumeGroup %s and the resource is marked as deleting. Delete the resource", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log.Info("VG was not yet created for the LVMVolumeGroup and the resource is marked as deleting. Delete the resource", + "vgName", lvg.Spec.ActualVGNameOnTheNode) removed, err := r.removeLVGFinalizerIfExist(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Error(err, "unable to remove the finalizer from the LVMVolumeGroup", + "finalizer", internal.SdsNodeConfiguratorFinalizer) return false, err } if removed { - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully removed the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Debug("successfully removed the finalizer from the LVMVolumeGroup", + "finalizer", internal.SdsNodeConfiguratorFinalizer) } else { - r.log.Debug(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] no need to remove the finalizer %s from the LVMVolumeGroup %s", internal.SdsNodeConfiguratorFinalizer, lvg.Name)) + log.Debug("no need to remove the finalizer from the LVMVolumeGroup", + "finalizer", internal.SdsNodeConfiguratorFinalizer) } err = r.lvgCl.DeleteLVMVolumeGroup(ctx, lvg) if err != nil { - r.log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupWatcherController] unable to delete the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to delete the LVMVolumeGroup") return false, err } - r.log.Info(fmt.Sprintf("[RunLVMVolumeGroupWatcherController] successfully deleted the LVMVolumeGroup %s", lvg.Name)) + log.Info("successfully deleted the LVMVolumeGroup") return true, nil } return false, nil @@ -713,27 +769,29 @@ func (r *Reconciler) validateLVGForCreateFunc( lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, ) (bool, string) { + log := r.log.WithName("validateLVGForCreateFunc").WithValues("lvgName", lvg.Name) reason := strings.Builder{} - r.log.Debug(fmt.Sprintf("[validateLVGForCreateFunc] check if every selected BlockDevice of the LVMVolumeGroup %s is consumable", lvg.Name)) + log.Debug("check if every selected BlockDevice of the LVMVolumeGroup is consumable") // totalVGSize needs to count if there is enough space for requested thin-pools totalVGSize := countVGSizeByBlockDevices(blockDevices) for _, bd := range blockDevices { if !bd.Status.Consumable { - r.log.Warning(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice %s is not consumable", bd.Name)) - r.log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] BlockDevice name: %s, status: %+v", bd.Name, bd.Status)) + log.Warning("BlockDevice is not consumable", "blockDeviceName", bd.Name) + log.Trace("BlockDevice", "name", bd.Name, "status", bd.Status) reason.WriteString(fmt.Sprintf("BlockDevice %s is not consumable. ", bd.Name)) } } if reason.Len() == 0 { - r.log.Debug(fmt.Sprintf("[validateLVGForCreateFunc] all BlockDevices of the LVMVolumeGroup %s are consumable", lvg.Name)) + log.Debug("all BlockDevices of the LVMVolumeGroup are consumable") } if lvg.Spec.ThinPools != nil { - r.log.Debug(fmt.Sprintf("[validateLVGForCreateFunc] the LVMVolumeGroup %s has thin-pools. Validate if VG size has enough space for the thin-pools", lvg.Name)) - r.log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] the LVMVolumeGroup %s has thin-pools %v", lvg.Name, lvg.Spec.ThinPools)) - r.log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] total LVMVolumeGroup %s size: %s", lvg.Name, totalVGSize.String())) + log.Debug("the LVMVolumeGroup has thin-pools. Validate if VG size has enough space for the thin-pools") + log.Trace("the LVMVolumeGroup has thin-pools and total size", + "thinPools", lvg.Spec.ThinPools, + "totalSize", totalVGSize) var totalThinPoolSize int64 for _, tp := range lvg.Spec.ThinPools { @@ -757,11 +815,13 @@ func (r *Reconciler) validateLVGForCreateFunc( totalThinPoolSize += tpRequestedSize.Value() } - r.log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] LVMVolumeGroup %s thin-pools requested space: %d", lvg.Name, totalThinPoolSize)) + log.Trace("LVMVolumeGroup thin-pools requested space", "requestedSpace", totalThinPoolSize) if totalThinPoolSize != totalVGSize.Value() && totalThinPoolSize+internal.ResizeDelta.Value() >= totalVGSize.Value() { - r.log.Trace(fmt.Sprintf("[validateLVGForCreateFunc] total thin pool size: %s, total vg size: %s", resource.NewQuantity(totalThinPoolSize, resource.BinarySI).String(), totalVGSize.String())) - r.log.Warning(fmt.Sprintf("[validateLVGForCreateFunc] requested thin pool size is more than VG total size for the LVMVolumeGroup %s", lvg.Name)) + log.Trace("total thin pool size and total vg size", + "totalThinPoolSize", resource.NewQuantity(totalThinPoolSize, resource.BinarySI), + "totalVGSize", totalVGSize) + log.Warning("requested thin pool size is more than VG total size for the LVMVolumeGroup") reason.WriteString(fmt.Sprintf("Required space for thin-pools %d is more than VG size %d.", totalThinPoolSize, totalVGSize.Value())) } } @@ -777,9 +837,10 @@ func (r *Reconciler) validateLVGForUpdateFunc( lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice, ) (bool, string) { + log := r.log.WithName("validateLVGForUpdateFunc").WithValues("lvgName", lvg.Name) reason := strings.Builder{} pvs, _ := r.sdsCache.GetPVs() - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] check if every new BlockDevice of the LVMVolumeGroup %s is comsumable", lvg.Name)) + log.Debug("check if every new BlockDevice of the LVMVolumeGroup is consumable") actualPVPaths := make(map[string]struct{}, len(pvs)) for _, pv := range pvs { actualPVPaths[pv.PVName] = struct{}{} @@ -791,24 +852,25 @@ func (r *Reconciler) validateLVGForUpdateFunc( // additionBlockDeviceSpace value is needed to count if VG will have enough space for thin-pools var additionBlockDeviceSpace int64 for _, bd := range blockDevices { + log := log.WithValues("blockDeviceName", bd.Name, "pvPath", bd.Status.Path) if _, found := actualPVPaths[bd.Status.Path]; !found { - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] unable to find the PV %s for BlockDevice %s. Check if the BlockDevice is already used", bd.Status.Path, bd.Name)) + log.Debug("unable to find the PV for BlockDevice. Check if the BlockDevice is already used") for _, n := range lvg.Status.Nodes { for _, d := range n.Devices { if d.BlockDevice == bd.Name { - r.log.Warning(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s misses the PV %s. That might be because the corresponding device was removed from the node. Unable to validate BlockDevices", bd.Name, bd.Status.Path)) + log.Warning("BlockDevice misses the PV. That might be because the corresponding device was removed from the node. Unable to validate BlockDevices") reason.WriteString(fmt.Sprintf("BlockDevice %s misses the PV %s (that might be because the device was removed from the node). ", bd.Name, bd.Status.Path)) } if reason.Len() == 0 { - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s does not miss a PV", d.BlockDevice)) + log.Debug("BlockDevice does not miss a PV") } } } - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] PV %s for BlockDevice %s of the LVMVolumeGroup %s is not created yet, check if the BlockDevice is consumable", bd.Status.Path, bd.Name, lvg.Name)) + log.Debug("PV for BlockDevice of the LVMVolumeGroup is not created yet, check if the BlockDevice is consumable") if reason.Len() > 0 { - r.log.Debug("[validateLVGForUpdateFunc] some BlockDevices misses its PVs, unable to check if they are consumable") + log.Debug("some BlockDevices misses its PVs, unable to check if they are consumable") continue } @@ -817,13 +879,13 @@ func (r *Reconciler) validateLVGForUpdateFunc( continue } - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] BlockDevice %s is consumable", bd.Name)) + log.Debug("BlockDevice is consumable") additionBlockDeviceSpace += bd.Status.Size.Value() } } if lvg.Spec.ThinPools != nil { - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s has thin-pools. Validate them", lvg.Name)) + log.Debug("the LVMVolumeGroup has thin-pools. Validate them") actualThinPools := make(map[string]internal.LVData, len(lvg.Spec.ThinPools)) for _, tp := range lvg.Spec.ThinPools { lv := r.sdsCache.FindLV(lvg.Spec.ActualVGNameOnTheNode, tp.Name) @@ -851,6 +913,7 @@ func (r *Reconciler) validateLVGForUpdateFunc( newTotalVGSize := resource.NewQuantity(vg.VGSize.Value()+additionBlockDeviceSpace, resource.BinarySI) for _, specTp := range lvg.Spec.ThinPools { + log := log.WithValues("thinPoolName", specTp.Name) // might be a case when Thin-pool is already created, but is not shown in status tpRequestedSize, err := utils.GetRequestedSizeFromString(specTp.Size, *newTotalVGSize) if err != nil { @@ -863,7 +926,9 @@ func (r *Reconciler) validateLVGForUpdateFunc( continue } - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s thin-pool %s requested size %s, Status VG size %s", lvg.Name, specTp.Name, tpRequestedSize.String(), lvg.Status.VGSize.String())) + log.Debug("the LVMVolumeGroup thin-pool requested size and Status VG size", + "requestedSize", tpRequestedSize, + "statusVGSize", lvg.Status.VGSize) switch utils.AreSizesEqualWithinDelta(tpRequestedSize, *newTotalVGSize, internal.ResizeDelta) { // means a user wants 100% of VG space case true: @@ -874,19 +939,23 @@ func (r *Reconciler) validateLVGForUpdateFunc( } case false: if actualThinPool, created := actualThinPools[specTp.Name]; !created { - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] thin-pool %s of the LVMVolumeGroup %s is not yet created, adds its requested size", specTp.Name, lvg.Name)) + log.Debug("thin-pool of the LVMVolumeGroup is not yet created, adds its requested size") addingThinPoolSize += tpRequestedSize.Value() } else { - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] thin-pool %s of the LVMVolumeGroup %s is already created, check its requested size", specTp.Name, lvg.Name)) + log.Debug("thin-pool of the LVMVolumeGroup is already created, check its requested size") if tpRequestedSize.Value()+internal.ResizeDelta.Value() < actualThinPool.LVSize.Value() { - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s Spec.ThinPool %s size %s is less than Status one: %s", lvg.Name, specTp.Name, tpRequestedSize.String(), actualThinPool.LVSize.String())) + log.Debug("the LVMVolumeGroup Spec.ThinPool size is less than Status one", + "requestedSize", tpRequestedSize, + "actualSize", actualThinPool.LVSize) reason.WriteString(fmt.Sprintf("Requested Spec.ThinPool %s size %s is less than actual one %s. ", specTp.Name, tpRequestedSize.String(), actualThinPool.LVSize.String())) continue } thinPoolSizeDiff := tpRequestedSize.Value() - actualThinPool.LVSize.Value() if thinPoolSizeDiff > internal.ResizeDelta.Value() { - r.log.Debug(fmt.Sprintf("[validateLVGForUpdateFunc] the LVMVolumeGroup %s Spec.ThinPool %s size %s more than Status one: %s", lvg.Name, specTp.Name, tpRequestedSize.String(), actualThinPool.LVSize.String())) + log.Debug("the LVMVolumeGroup Spec.ThinPool size more than Status one", + "requestedSize", tpRequestedSize, + "actualSize", actualThinPool.LVSize) addingThinPoolSize += thinPoolSizeDiff } } @@ -896,7 +965,10 @@ func (r *Reconciler) validateLVGForUpdateFunc( if !hasFullThinPool { allocatedSize := getVGAllocatedSize(*vg) totalFreeSpace := newTotalVGSize.Value() - allocatedSize.Value() - r.log.Trace(fmt.Sprintf("[validateLVGForUpdateFunc] new LVMVolumeGroup %s thin-pools requested %d size, additional BlockDevices space %d, total: %d", lvg.Name, addingThinPoolSize, additionBlockDeviceSpace, totalFreeSpace)) + log.Trace("new LVMVolumeGroup thin-pools requested size, additional BlockDevices space, total", + "addingThinPoolSize", addingThinPoolSize, + "additionBlockDeviceSpace", additionBlockDeviceSpace, + "totalFreeSpace", totalFreeSpace) if addingThinPoolSize != 0 && addingThinPoolSize+internal.ResizeDelta.Value() > totalFreeSpace { reason.WriteString("Added thin-pools requested sizes are more than allowed free space in VG.") } @@ -954,6 +1026,7 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( vg internal.VGData, lvs []internal.LVData, ) error { + log := r.log.WithName("reconcileThinPoolsIfNeeded").WithValues("lvgName", lvg.Name) actualThinPools := make(map[string]internal.LVData, len(lvs)) for _, lv := range lvs { if string(lv.LVAttr[0]) == "t" { @@ -963,18 +1036,21 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( errs := strings.Builder{} for _, specTp := range lvg.Spec.ThinPools { + log := log.WithValues("thinPoolName", specTp.Name) tpRequestedSize, err := utils.GetRequestedSizeFromString(specTp.Size, lvg.Status.VGSize) if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to get requested thin-pool %s size of the LVMVolumeGroup %s", specTp.Name, lvg.Name)) + log.Error(err, "unable to get requested thin-pool size of the LVMVolumeGroup") return err } if actualTp, exist := actualThinPools[specTp.Name]; !exist { - r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s is not created yet. Create it", specTp.Name, lvg.Name)) + log.Debug("thin-pool of the LVMVolumeGroup is not created yet. Create it") if isApplied(lvg) { err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) + log.Error(err, "unable to add the condition status False reason to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "reason", internal.ReasonUpdating) return err } } @@ -982,40 +1058,46 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( var cmd string start := time.Now() if utils.AreSizesEqualWithinDelta(tpRequestedSize, lvg.Status.VGSize, internal.ResizeDelta) { - r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s will be created with size 100FREE", specTp.Name, lvg.Name)) + log.Debug("thin-pool of the LVMVolumeGroup will be created with size 100FREE") cmd, err = r.commands.CreateThinPoolFullVGSpace(specTp.Name, vg.VGName) } else { - r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s will be created with size %s", specTp.Name, lvg.Name, tpRequestedSize.String())) + log.Debug("thin-pool of the LVMVolumeGroup will be created with size", + "size", tpRequestedSize) cmd, err = r.commands.CreateThinPool(specTp.Name, vg.VGName, tpRequestedSize.Value()) } r.metrics.UtilsCommandsDuration(ReconcilerName, "lvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "lvcreate").Inc() if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "lvcreate").Inc() - r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to create thin-pool %s of the LVMVolumeGroup %s, cmd: %s", specTp.Name, lvg.Name, cmd)) + log.Error(err, "unable to create thin-pool of the LVMVolumeGroup", + "cmd", cmd) errs.WriteString(fmt.Sprintf("unable to create thin-pool %s, err: %s. ", specTp.Name, err.Error())) continue } - r.log.Info(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] thin-pool %s of the LVMVolumeGroup %s has been successfully created", specTp.Name, lvg.Name)) + log.Info("thin-pool of the LVMVolumeGroup has been successfully created") } else { // thin-pool exists if utils.AreSizesEqualWithinDelta(tpRequestedSize, actualTp.LVSize, internal.ResizeDelta) { - r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] the LVMVolumeGroup %s requested thin pool %s size is equal to actual one", lvg.Name, tpRequestedSize.String())) + log.Debug("the LVMVolumeGroup requested thin pool size is equal to actual one", + "requestedSize", tpRequestedSize) continue } - r.log.Debug(fmt.Sprintf("[ReconcileThinPoolsIfNeeded] the LVMVolumeGroup %s requested thin pool %s size is more than actual one. Resize it", lvg.Name, tpRequestedSize.String())) + log.Debug("the LVMVolumeGroup requested thin pool size is more than actual one. Resize it", + "requestedSize", tpRequestedSize.String()) if isApplied(lvg) { err = r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) + log.Error(err, "unable to add the condition status False reason to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "reason", internal.ReasonUpdating) return err } } err = r.extendThinPool(lvg, specTp) if err != nil { - r.log.Error(err, fmt.Sprintf("[ReconcileThinPoolsIfNeeded] unable to resize thin-pool %s of the LVMVolumeGroup %s", specTp.Name, lvg.Name)) + log.Error(err, "unable to resize thin-pool of the LVMVolumeGroup") errs.WriteString(fmt.Sprintf("unable to resize thin-pool %s, err: %s. ", specTp.Name, err.Error())) continue } @@ -1030,24 +1112,28 @@ func (r *Reconciler) reconcileThinPoolsIfNeeded( } func (r *Reconciler) resizePVIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVolumeGroup) error { + log := r.log.WithName("resizePVIfNeeded").WithValues("lvgName", lvg.Name) if len(lvg.Status.Nodes) == 0 { - r.log.Warning(fmt.Sprintf("[ResizePVIfNeeded] the LVMVolumeGroup %s nodes are empty. Wait for the next update", lvg.Name)) + log.Warning("the LVMVolumeGroup nodes are empty. Wait for the next update") return nil } errs := strings.Builder{} for _, n := range lvg.Status.Nodes { for _, d := range n.Devices { + log := log.WithValues("blockDeviceName", d.BlockDevice, "pvPath", d.Path) if d.DevSize.Value()-d.PVSize.Value() > internal.ResizeDelta.Value() { if isApplied(lvg) { err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { - r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) + log.Error(err, "unable to add the condition status False reason to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "reason", internal.ReasonUpdating) return err } } - r.log.Debug(fmt.Sprintf("[ResizePVIfNeeded] the LVMVolumeGroup %s BlockDevice %s PVSize is less than actual device size. Resize PV", lvg.Name, d.BlockDevice)) + log.Debug("the LVMVolumeGroup BlockDevice PVSize is less than actual device size. Resize PV") start := time.Now() cmd, err := r.commands.ResizePV(d.Path) @@ -1055,14 +1141,15 @@ func (r *Reconciler) resizePVIfNeeded(ctx context.Context, lvg *v1alpha1.LVMVolu r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvresize") if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvresize").Inc() - r.log.Error(err, fmt.Sprintf("[ResizePVIfNeeded] unable to resize PV %s of BlockDevice %s of LVMVolumeGroup %s, cmd: %s", d.Path, d.BlockDevice, lvg.Name, cmd)) + log.Error(err, "unable to resize PV of BlockDevice of LVMVolumeGroup", + "cmd", cmd) errs.WriteString(fmt.Sprintf("unable to resize PV %s, err: %s. ", d.Path, err.Error())) continue } - r.log.Info(fmt.Sprintf("[ResizePVIfNeeded] successfully resized PV %s of BlockDevice %s of LVMVolumeGroup %s", d.Path, d.BlockDevice, lvg.Name)) + log.Info("successfully resized PV of BlockDevice of LVMVolumeGroup") } else { - r.log.Debug(fmt.Sprintf("[ResizePVIfNeeded] no need to resize PV %s of BlockDevice %s of the LVMVolumeGroup %s", d.Path, d.BlockDevice, lvg.Name)) + log.Debug("no need to resize PV of BlockDevice of the LVMVolumeGroup") } } } @@ -1081,9 +1168,11 @@ func (r *Reconciler) extendVGIfNeeded( pvs []internal.PVData, blockDevices map[string]v1alpha1.BlockDevice, ) error { + log := r.log.WithName("extendVGIfNeeded").WithValues("lvgName", lvg.Name, "vgName", vg.VGName) for _, n := range lvg.Status.Nodes { for _, d := range n.Devices { - r.log.Trace(fmt.Sprintf("[ExtendVGIfNeeded] the LVMVolumeGroup %s status block device: %s", lvg.Name, d.BlockDevice)) + log := log.WithValues("blockDeviceName", d.BlockDevice) + log.Trace("the LVMVolumeGroup status block device") } } @@ -1095,32 +1184,34 @@ func (r *Reconciler) extendVGIfNeeded( devicesToExtend := make([]string, 0, len(blockDevices)) for _, bd := range blockDevices { if _, exist := pvsMap[bd.Status.Path]; !exist { - r.log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] the BlockDevice %s of LVMVolumeGroup %s Spec is not counted as used", bd.Name, lvg.Name)) + log.Debug("the BlockDevice of LVMVolumeGroup Spec is not counted as used", "blockDeviceName", bd.Name) devicesToExtend = append(devicesToExtend, bd.Name) } } if len(devicesToExtend) == 0 { - r.log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] VG %s of the LVMVolumeGroup %s should not be extended", vg.VGName, lvg.Name)) + log.Debug("VG of the LVMVolumeGroup should not be extended") return nil } if isApplied(lvg) { err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { - r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) + log.Error(err, "unable to add the condition status False reason to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "reason", internal.ReasonUpdating) return err } } - r.log.Debug(fmt.Sprintf("[ExtendVGIfNeeded] VG %s should be extended as there are some BlockDevices were added to Spec field of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) + log.Debug("VG should be extended as there are some BlockDevices were added to Spec field of the LVMVolumeGroup") paths := extractPathsFromBlockDevices(devicesToExtend, blockDevices) err := r.extendVGComplex(paths, vg.VGName) if err != nil { - r.log.Error(err, fmt.Sprintf("[ExtendVGIfNeeded] unable to extend VG %s of the LVMVolumeGroup %s", vg.VGName, lvg.Name)) + log.Error(err, "unable to extend VG of the LVMVolumeGroup") return err } - r.log.Info(fmt.Sprintf("[ExtendVGIfNeeded] VG %s of the LVMVolumeGroup %s was extended", vg.VGName, lvg.Name)) + log.Info("VG of the LVMVolumeGroup was extended") return nil } @@ -1169,16 +1260,17 @@ func (r *Reconciler) getLVForVG(vgName string) []string { } func (r *Reconciler) deleteVGIfExist(vgName string) error { + log := r.log.WithName("deleteVGIfExist").WithValues("vgName", vgName) vgs, _ := r.sdsCache.GetVGs() if !checkIfVGExist(vgName, vgs) { - r.log.Debug(fmt.Sprintf("[DeleteVGIfExist] no VG %s found, nothing to delete", vgName)) + log.Debug("no VG found, nothing to delete") return nil } pvs, _ := r.sdsCache.GetPVs() if len(pvs) == 0 { err := errors.New("no any PV found") - r.log.Error(err, fmt.Sprintf("[DeleteVGIfExist] no any PV was found while deleting VG %s", vgName)) + log.Error(err, "no any PV was found while deleting VG") return err } @@ -1186,13 +1278,12 @@ func (r *Reconciler) deleteVGIfExist(vgName string) error { command, err := r.commands.RemoveVG(vgName) r.metrics.UtilsCommandsDuration(ReconcilerName, "vgremove").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgremove").Inc() - r.log.Debug(command) if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgremove").Inc() - r.log.Error(err, "RemoveVG "+command) + log.Error(err, "RemoveVG", "command", command) return err } - r.log.Debug(fmt.Sprintf("[DeleteVGIfExist] VG %s was successfully deleted from the node", vgName)) + log.Debug("VG was successfully deleted from the node", "command", command) var pvsToRemove []string for _, pv := range pvs { if pv.VGName == vgName { @@ -1204,27 +1295,27 @@ func (r *Reconciler) deleteVGIfExist(vgName string) error { command, err = r.commands.RemovePV(pvsToRemove) r.metrics.UtilsCommandsDuration(ReconcilerName, "pvremove").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvremove").Inc() - r.log.Debug(command) if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvremove").Inc() - r.log.Error(err, "RemovePV "+command) + log.Error(err, "RemovePV", "command", command) return err } - r.log.Debug(fmt.Sprintf("[DeleteVGIfExist] successfully delete PVs of VG %s from the node", vgName)) + log.Debug("successfully delete PVs of VG from the node", "command", command) return nil } func (r *Reconciler) extendVGComplex(extendPVs []string, vgName string) error { + log := r.log.WithName("extendVGComplex").WithValues("vgName", vgName) for _, pvPath := range extendPVs { start := time.Now() command, err := r.commands.CreatePV(pvPath) r.metrics.UtilsCommandsDuration(ReconcilerName, "pvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvcreate").Inc() - r.log.Debug(command) + log.Debug("CreatePV command", "command", command) if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvcreate").Inc() - r.log.Error(err, "CreatePV ") + log.Error(err, "CreatePV") return err } } @@ -1233,44 +1324,45 @@ func (r *Reconciler) extendVGComplex(extendPVs []string, vgName string) error { command, err := r.commands.ExtendVG(vgName, extendPVs) r.metrics.UtilsCommandsDuration(ReconcilerName, "vgextend").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgextend").Inc() - r.log.Debug(command) + log.Debug("ExtendVG command", "command", command) if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgextend").Inc() - r.log.Error(err, "ExtendVG ") + log.Error(err, "ExtendVG") return err } return nil } func (r *Reconciler) createVGComplex(lvg *v1alpha1.LVMVolumeGroup, blockDevices map[string]v1alpha1.BlockDevice) error { + log := r.log.WithName("createVGComplex").WithValues("lvgName", lvg.Name, "vgName", lvg.Spec.ActualVGNameOnTheNode) paths := extractPathsFromBlockDevices(nil, blockDevices) - r.log.Trace(fmt.Sprintf("[CreateVGComplex] LVMVolumeGroup %s devices paths %v", lvg.Name, paths)) + log.Trace("LVMVolumeGroup devices paths", "paths", paths) for _, path := range paths { + log := log.WithValues("path", path) start := time.Now() command, err := r.commands.CreatePV(path) r.metrics.UtilsCommandsDuration(ReconcilerName, "pvcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "pvcreate").Inc() - r.log.Debug(command) + log.Debug("CreatePV command", "command", command) if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "pvcreate").Inc() - r.log.Error(err, fmt.Sprintf("[CreateVGComplex] unable to create PV by path %s", path)) + log.Error(err, "unable to create PV by path") return err } } - r.log.Debug(fmt.Sprintf("[CreateVGComplex] successfully created all PVs for the LVMVolumeGroup %s", lvg.Name)) - r.log.Debug(fmt.Sprintf("[CreateVGComplex] the LVMVolumeGroup %s type is %s", lvg.Name, lvg.Spec.Type)) + log.Debug("successfully created all PVs for the LVMVolumeGroup", "type", lvg.Spec.Type) switch lvg.Spec.Type { case internal.Local: start := time.Now() cmd, err := r.commands.CreateVGLocal(lvg.Spec.ActualVGNameOnTheNode, lvg.Name, paths) r.metrics.UtilsCommandsDuration(ReconcilerName, "vgcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgcreate").Inc() - r.log.Debug(cmd) + log.Debug("CreateVGLocal command", "command", cmd) if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgcreate").Inc() - r.log.Error(err, "error CreateVGLocal") + log.Error(err, "error CreateVGLocal") return err } case internal.Shared: @@ -1278,15 +1370,15 @@ func (r *Reconciler) createVGComplex(lvg *v1alpha1.LVMVolumeGroup, blockDevices cmd, err := r.commands.CreateVGShared(lvg.Spec.ActualVGNameOnTheNode, lvg.Name, paths) r.metrics.UtilsCommandsDuration(ReconcilerName, "vgcreate").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgcreate").Inc() - r.log.Debug(cmd) + log.Debug("CreateVGShared command", "command", cmd) if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgcreate").Inc() - r.log.Error(err, "error CreateVGShared") + log.Error(err, "error CreateVGShared") return err } } - r.log.Debug(fmt.Sprintf("[CreateVGComplex] successfully create VG %s of the LVMVolumeGroup %s", lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log.Debug("successfully create VG of the LVMVolumeGroup") return nil } @@ -1296,12 +1388,15 @@ func (r *Reconciler) updateVGTagIfNeeded( lvg *v1alpha1.LVMVolumeGroup, vg internal.VGData, ) (bool, error) { + log := r.log.WithName("updateVGTagIfNeeded").WithValues("lvgName", lvg.Name, "vgName", vg.VGName) found, tagName := utils.ReadValueFromTags(vg.VGTags, internal.LVMVolumeGroupTag) if found && lvg.Name != tagName { if isApplied(lvg) { err := r.lvgCl.UpdateLVGConditionIfNeeded(ctx, lvg, v1.ConditionFalse, internal.TypeVGConfigurationApplied, internal.ReasonUpdating, "trying to apply the configuration") if err != nil { - r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add the condition %s status False reason %s to the LVMVolumeGroup %s", internal.TypeVGConfigurationApplied, internal.ReasonUpdating, lvg.Name)) + log.Error(err, "unable to add the condition status False reason to the LVMVolumeGroup", + "conditionType", internal.TypeVGConfigurationApplied, + "reason", internal.ReasonUpdating) return false, err } } @@ -1310,9 +1405,9 @@ func (r *Reconciler) updateVGTagIfNeeded( cmd, err := r.commands.VGChangeDelTag(vg.VGName, fmt.Sprintf("%s=%s", internal.LVMVolumeGroupTag, tagName)) r.metrics.UtilsCommandsDuration(ReconcilerName, "vgchange").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgchange").Inc() - r.log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) + log.Debug("VGChangeDelTag command", "command", cmd) if err != nil { - r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to delete LVMVolumeGroupTag: %s=%s, vg: %s", internal.LVMVolumeGroupTag, tagName, vg.VGName)) + log.Error(err, "unable to delete LVMVolumeGroupTag", "tagKey", internal.LVMVolumeGroupTag, "tagValue", tagName) r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgchange").Inc() return false, err } @@ -1321,9 +1416,9 @@ func (r *Reconciler) updateVGTagIfNeeded( cmd, err = r.commands.VGChangeAddTag(vg.VGName, fmt.Sprintf("%s=%s", internal.LVMVolumeGroupTag, lvg.Name)) r.metrics.UtilsCommandsDuration(ReconcilerName, "vgchange").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "vgchange").Inc() - r.log.Debug(fmt.Sprintf("[UpdateVGTagIfNeeded] exec cmd: %s", cmd)) + log.Debug("VGChangeAddTag command", "command", cmd) if err != nil { - r.log.Error(err, fmt.Sprintf("[UpdateVGTagIfNeeded] unable to add LVMVolumeGroupTag: %s=%s, vg: %s", internal.LVMVolumeGroupTag, lvg.Name, vg.VGName)) + log.Error(err, "unable to add LVMVolumeGroupTag", "tagKey", internal.LVMVolumeGroupTag, "tagValue", lvg.Name) r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "vgchange").Inc() return false, err } @@ -1335,32 +1430,34 @@ func (r *Reconciler) updateVGTagIfNeeded( } func (r *Reconciler) extendThinPool(lvg *v1alpha1.LVMVolumeGroup, specThinPool v1alpha1.LVMVolumeGroupThinPoolSpec) error { + log := r.log.WithName("extendThinPool").WithValues("lvgName", lvg.Name, "thinPoolName", specThinPool.Name) volumeGroupFreeSpaceBytes := lvg.Status.VGSize.Value() - lvg.Status.AllocatedSize.Value() tpRequestedSize, err := utils.GetRequestedSizeFromString(specThinPool.Size, lvg.Status.VGSize) if err != nil { return err } - r.log.Trace(fmt.Sprintf("[ExtendThinPool] volumeGroupSize = %s", lvg.Status.VGSize.String())) - r.log.Trace(fmt.Sprintf("[ExtendThinPool] volumeGroupAllocatedSize = %s", lvg.Status.AllocatedSize.String())) - r.log.Trace(fmt.Sprintf("[ExtendThinPool] volumeGroupFreeSpaceBytes = %d", volumeGroupFreeSpaceBytes)) + log.Trace("volume group sizes", + "vgSize", lvg.Status.VGSize, + "allocatedSize", lvg.Status.AllocatedSize, + "freeSpaceBytes", volumeGroupFreeSpaceBytes) - r.log.Info(fmt.Sprintf("[ExtendThinPool] start resizing thin pool: %s; with new size: %s", specThinPool.Name, tpRequestedSize.String())) + log.Info("start resizing thin pool", "newSize", tpRequestedSize) var cmd string start := time.Now() if utils.AreSizesEqualWithinDelta(tpRequestedSize, lvg.Status.VGSize, internal.ResizeDelta) { - r.log.Debug(fmt.Sprintf("[ExtendThinPool] thin-pool %s of the LVMVolumeGroup %s will be extend to size 100VG", specThinPool.Name, lvg.Name)) + log.Debug("thin-pool of the LVMVolumeGroup will be extend to size 100VG") cmd, err = r.commands.ExtendLVFullVGSpace(lvg.Spec.ActualVGNameOnTheNode, specThinPool.Name) } else { - r.log.Debug(fmt.Sprintf("[ExtendThinPool] thin-pool %s of the LVMVolumeGroup %s will be extend to size %s", specThinPool.Name, lvg.Name, tpRequestedSize.String())) + log.Debug("thin-pool of the LVMVolumeGroup will be extend to size", "size", tpRequestedSize) cmd, err = r.commands.ExtendLV(tpRequestedSize.Value(), lvg.Spec.ActualVGNameOnTheNode, specThinPool.Name) } r.metrics.UtilsCommandsDuration(ReconcilerName, "lvextend").Observe(r.metrics.GetEstimatedTimeInSeconds(start)) r.metrics.UtilsCommandsExecutionCount(ReconcilerName, "lvextend").Inc() if err != nil { r.metrics.UtilsCommandsErrorsCount(ReconcilerName, "lvextend").Inc() - r.log.Error(err, fmt.Sprintf("[ExtendThinPool] unable to extend LV, name: %s, cmd: %s", specThinPool.Name, cmd)) + log.Error(err, "unable to extend LV", "command", cmd) return err } diff --git a/images/agent/internal/logger/logger.go b/images/agent/internal/logger/logger.go index b60679674..8fbae83bd 100644 --- a/images/agent/internal/logger/logger.go +++ b/images/agent/internal/logger/logger.go @@ -17,7 +17,6 @@ limitations under the License. package logger import ( - "fmt" "strconv" "github.com/go-logr/logr" @@ -62,26 +61,38 @@ func NewLoggerWrap(log logr.Logger) Logger { return Logger{log: log} } +// WithName creates a new Logger instance with an additional name component. +// The name is used to identify the source of log messages. +func (l Logger) WithName(name string) Logger { + return NewLoggerWrap(l.GetLogger().WithName(name)) +} + +// WithValues creates a new Logger instance with additional key-value pairs. +// These key-value pairs will be included in all subsequent log messages from this logger. +func (l Logger) WithValues(keysAndValues ...any) Logger { + return NewLoggerWrap(l.GetLogger().WithValues(keysAndValues...)) +} + func (l Logger) GetLogger() logr.Logger { return l.log } func (l Logger) Error(err error, message string, keysAndValues ...interface{}) { - l.log.Error(err, fmt.Sprintf("ERROR %s", message), keysAndValues...) + l.log.WithValues("level", "ERROR").Error(err, message, keysAndValues...) } func (l Logger) Warning(message string, keysAndValues ...interface{}) { - l.log.V(warnLvl).Info(fmt.Sprintf("WARNING %s", message), keysAndValues...) + l.log.V(warnLvl).WithValues("level", "WARNING").Info(message, keysAndValues...) } func (l Logger) Info(message string, keysAndValues ...interface{}) { - l.log.V(infoLvl).Info(fmt.Sprintf("INFO %s", message), keysAndValues...) + l.log.V(infoLvl).WithValues("level", "INFO").Info(message, keysAndValues...) } func (l Logger) Debug(message string, keysAndValues ...interface{}) { - l.log.V(debugLvl).Info(fmt.Sprintf("DEBUG %s", message), keysAndValues...) + l.log.V(debugLvl).WithValues("level", "DEBUG").Info(message, keysAndValues...) } func (l Logger) Trace(message string, keysAndValues ...interface{}) { - l.log.V(traceLvl).Info(fmt.Sprintf("TRACE %s", message), keysAndValues...) + l.log.V(traceLvl).WithValues("level", "TRACE").Info(message, keysAndValues...) } diff --git a/images/agent/internal/mock_utils/block_device.go b/images/agent/internal/mock_utils/block_device.go index 07b0e5b3f..e2004465d 100644 --- a/images/agent/internal/mock_utils/block_device.go +++ b/images/agent/internal/mock_utils/block_device.go @@ -1,18 +1,18 @@ -/* -Copyright 2025 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// /* +// Copyright YEAR Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ // Code generated by MockGen. DO NOT EDIT. // Source: block_device.go @@ -29,9 +29,8 @@ import ( fs "io/fs" reflect "reflect" - gomock "go.uber.org/mock/gomock" - utils "github.com/deckhouse/sds-node-configurator/images/agent/internal/utils" + gomock "go.uber.org/mock/gomock" ) // MockDiscarder is a mock of Discarder interface. diff --git a/images/agent/internal/mock_utils/commands.go b/images/agent/internal/mock_utils/commands.go index 1542e337e..aa3c90489 100644 --- a/images/agent/internal/mock_utils/commands.go +++ b/images/agent/internal/mock_utils/commands.go @@ -1,25 +1,25 @@ -/* -Copyright 2025 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// /* +// Copyright YEAR Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ // Code generated by MockGen. DO NOT EDIT. // Source: commands.go // // Generated by this command: // -// mockgen -write_source_comment -destination=../mock_utils/commands.go -source=commands.go +// mockgen -write_source_comment -destination=../mock_utils/commands.go -source=commands.go -copyright_file=../../../../hack/boilerplate.txt // // Package mock_utils is a generated GoMock package. @@ -30,11 +30,10 @@ import ( context "context" reflect "reflect" - gomock "go.uber.org/mock/gomock" - internal "github.com/deckhouse/sds-node-configurator/images/agent/internal" logger "github.com/deckhouse/sds-node-configurator/images/agent/internal/logger" monitoring "github.com/deckhouse/sds-node-configurator/images/agent/internal/monitoring" + gomock "go.uber.org/mock/gomock" ) // MockCommands is a mock of Commands interface. diff --git a/images/agent/internal/mock_utils/syscall.go b/images/agent/internal/mock_utils/syscall.go index 18ae434d1..b87c9c181 100644 --- a/images/agent/internal/mock_utils/syscall.go +++ b/images/agent/internal/mock_utils/syscall.go @@ -1,25 +1,25 @@ -/* -Copyright 2025 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// /* +// Copyright YEAR Flant JSC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ // Code generated by MockGen. DO NOT EDIT. // Source: syscall.go // // Generated by this command: // -// mockgen -write_source_comment -destination=../mock_utils/syscall.go -source=syscall.go +// mockgen -write_source_comment -destination=../mock_utils/syscall.go -source=syscall.go -copyright_file=../../../../hack/boilerplate.txt // // Package mock_utils is a generated GoMock package. @@ -28,9 +28,8 @@ package mock_utils import ( reflect "reflect" - gomock "go.uber.org/mock/gomock" - utils "github.com/deckhouse/sds-node-configurator/images/agent/internal/utils" + gomock "go.uber.org/mock/gomock" ) // MockSysCall is a mock of SysCall interface. diff --git a/images/agent/internal/repository/client_llv.go b/images/agent/internal/repository/client_llv.go index 68ba9d7ed..061875fb1 100644 --- a/images/agent/internal/repository/client_llv.go +++ b/images/agent/internal/repository/client_llv.go @@ -18,7 +18,6 @@ package repository import ( "context" - "fmt" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,10 +47,11 @@ func (llvCl *LLVClient) UpdatePhaseIfNeeded( phase string, reason string, ) error { + log := llvCl.log.WithName("UpdatePhaseIfNeeded").WithValues("llvName", llv.Name) if llv.Status != nil && llv.Status.Phase == phase && llv.Status.Reason == reason { - llvCl.log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] no need to update the LVMLogicalVolume %s phase and reason", llv.Name)) + log.Debug("no need to update the LVMLogicalVolume phase and reason") return nil } @@ -62,13 +62,14 @@ func (llvCl *LLVClient) UpdatePhaseIfNeeded( llv.Status.Phase = phase llv.Status.Reason = reason - llvCl.log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] tries to update the LVMLogicalVolume %s status with phase: %s, reason: %s", llv.Name, phase, reason)) + log = log.WithValues("phase", phase, "reason", reason) + log.Debug("tries to update the LVMLogicalVolume status") err := llvCl.cl.Status().Update(ctx, llv) if err != nil { return err } - llvCl.log.Debug(fmt.Sprintf("[updateLVMLogicalVolumePhaseIfNeeded] updated LVMLogicalVolume %s status.phase to %s and reason to %s", llv.Name, phase, reason)) + log.Debug("updated LVMLogicalVolume status") return nil } @@ -77,6 +78,7 @@ func (llvCl *LLVClient) UpdatePhaseToCreatedIfNeeded( llv *v1alpha1.LVMLogicalVolume, actualSize resource.Quantity, ) error { + log := llvCl.log.WithName("UpdatePhaseToCreatedIfNeeded").WithValues("llvName", llv.Name) var contiguous *bool if llv.Spec.Thick != nil && llv.Spec.Thick.Contiguous != nil { if *llv.Spec.Thick.Contiguous { @@ -90,7 +92,7 @@ func (llvCl *LLVClient) UpdatePhaseToCreatedIfNeeded( llv.Status.Contiguous != contiguous if !updateNeeded { - llvCl.log.Info(fmt.Sprintf("[UpdatePhaseToCreatedIfNeeded] no need to update the LVMLogicalVolume %s", llv.Name)) + log.Info("no need to update the LVMLogicalVolume") return nil } @@ -100,10 +102,10 @@ func (llvCl *LLVClient) UpdatePhaseToCreatedIfNeeded( llv.Status.Contiguous = contiguous err := llvCl.cl.Status().Update(ctx, llv) if err != nil { - llvCl.log.Error(err, fmt.Sprintf("[UpdatePhaseToCreatedIfNeeded] unable to update the LVMLogicalVolume %s", llv.Name)) + log.Error(err, "unable to update the LVMLogicalVolume") return err } - llvCl.log.Info(fmt.Sprintf("[UpdatePhaseToCreatedIfNeeded] the LVMLogicalVolume %s was successfully updated", llv.Name)) + log.Info("the LVMLogicalVolume was successfully updated") return nil } diff --git a/images/agent/internal/repository/client_lvg.go b/images/agent/internal/repository/client_lvg.go index f942705c3..24e1fd8b4 100644 --- a/images/agent/internal/repository/client_lvg.go +++ b/images/agent/internal/repository/client_lvg.go @@ -18,7 +18,6 @@ package repository import ( "context" - "fmt" "time" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -73,6 +72,7 @@ func (lvgCl *LVGClient) UpdateLVGConditionIfNeeded( status v1.ConditionStatus, conType, reason, message string, ) error { + log := lvgCl.log.WithName("UpdateLVGConditionIfNeeded").WithValues("lvgName", lvg.Name, "conditionType", conType) exist := false index := 0 newCondition := v1.Condition{ @@ -85,38 +85,46 @@ func (lvgCl *LVGClient) UpdateLVGConditionIfNeeded( } if lvg.Status.Conditions == nil { - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] the LVMVolumeGroup %s conditions is nil. Initialize them", lvg.Name)) + log.Debug("the LVMVolumeGroup conditions is nil. Initialize them") lvg.Status.Conditions = make([]v1.Condition, 0, 5) } if len(lvg.Status.Conditions) > 0 { - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] there are some conditions in the LVMVolumeGroup %s. Tries to find a condition %s", lvg.Name, conType)) + log.Debug("there are some conditions in the LVMVolumeGroup. Tries to find a condition") for i, c := range lvg.Status.Conditions { if c.Type == conType { if checkIfEqualConditions(c, newCondition) { - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no need to update condition %s in the LVMVolumeGroup %s as new and old condition states are the same", conType, lvg.Name)) + log.Debug("no need to update condition as new and old condition states are the same") return nil } index = i exist = true - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was found in the LVMVolumeGroup %s at the index %d", conType, lvg.Name, i)) + log := log.WithValues("index", index) + log.Debug("a condition was found in the LVMVolumeGroup at the index") } } if !exist { - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was not found. Append it in the end of the LVMVolumeGroup %s conditions", conType, lvg.Name)) + log.Debug("a condition was not found. Append it in the end of the LVMVolumeGroup conditions") lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) } else { - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] insert the condition %s status %s reason %s message %s at index %d of the LVMVolumeGroup %s conditions", conType, status, reason, message, index, lvg.Name)) + log := log.WithValues("index", index) + log.Debug("insert the condition at index of the LVMVolumeGroup conditions", + "status", status, + "reason", reason, + "message", message) lvg.Status.Conditions[index] = newCondition } } else { - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no conditions were found in the LVMVolumeGroup %s. Append the condition %s in the end", lvg.Name, conType)) + log.Debug("no conditions were found in the LVMVolumeGroup. Append the condition in the end") lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) } - lvgCl.log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] tries to update the condition type %s status %s reason %s message %s of the LVMVolumeGroup %s", conType, status, reason, message, lvg.Name)) + log.Debug("tries to update the condition", + "status", status, + "reason", reason, + "message", message) return lvgCl.cl.Status().Update(ctx, lvg) } @@ -124,12 +132,13 @@ func (lvgCl *LVGClient) DeleteLVMVolumeGroup( ctx context.Context, lvg *v1alpha1.LVMVolumeGroup, ) error { - lvgCl.log.Debug(fmt.Sprintf(`[DeleteLVMVolumeGroup] Node "%s" does not belong to VG "%s". It will be removed from LVM resource, name "%s"'`, lvgCl.currentNodeName, lvg.Spec.ActualVGNameOnTheNode, lvg.Name)) + log := lvgCl.log.WithName("DeleteLVMVolumeGroup").WithValues("lvgName", lvg.Name, "nodeName", lvgCl.currentNodeName, "vgName", lvg.Spec.ActualVGNameOnTheNode) + log.Debug("Node does not belong to VG. It will be removed from LVM resource") for i, node := range lvg.Status.Nodes { if node.Name == lvgCl.currentNodeName { // delete node lvg.Status.Nodes = append(lvg.Status.Nodes[:i], lvg.Status.Nodes[i+1:]...) - lvgCl.log.Info(fmt.Sprintf(`[DeleteLVMVolumeGroup] deleted node "%s" from LVMVolumeGroup "%s"`, node.Name, lvg.Name)) + log.Info("deleted node from LVMVolumeGroup", "deletedNodeName", node.Name) } } @@ -143,7 +152,7 @@ func (lvgCl *LVGClient) DeleteLVMVolumeGroup( lvgCl.metrics.APIMethodsErrors(lvgCl.controllerName, "delete").Inc() return err } - lvgCl.log.Info(fmt.Sprintf("[DeleteLVMVolumeGroup] the LVMVolumeGroup %s deleted", lvg.Name)) + log.Info("the LVMVolumeGroup deleted") } return nil diff --git a/images/agent/internal/scanner/scanner.go b/images/agent/internal/scanner/scanner.go index ea3a2b580..67ffac610 100644 --- a/images/agent/internal/scanner/scanner.go +++ b/images/agent/internal/scanner/scanner.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "errors" - "fmt" "time" "github.com/pilebones/go-udev/netlink" @@ -62,16 +61,17 @@ func (s *scanner) Run( bdCtrl func(context.Context) (controller.Result, error), lvgDiscoverCtrl func(context.Context) (controller.Result, error), ) error { - log.Info("[RunScanner] starts the work") + log = log.WithName("RunScanner") + log.Info("starts the work") t := throttler.New(cfg.ThrottleInterval) conn := new(netlink.UEventConn) if err := conn.Connect(netlink.UdevEvent); err != nil { - log.Error(err, "[RunScanner] Failed to connect to Netlink") + log.Error(err, "Failed to connect to Netlink") return err } - log.Debug("[RunScanner] system socket connection succeeded") + log.Debug("system socket connection succeeded") errChan := make(chan error) eventChan := make(chan netlink.UEvent) @@ -86,7 +86,7 @@ func (s *scanner) Run( } quit := conn.Monitor(eventChan, errChan, matcher) - log.Info("[RunScanner] start to listen to events") + log.Info("start to listen to events") duration := 1 * time.Second timer := time.NewTimer(duration) @@ -94,62 +94,64 @@ func (s *scanner) Run( select { case device, open := <-eventChan: timer.Reset(duration) - log.Debug(fmt.Sprintf("[RunScanner] event triggered for device: %s", device.Env["DEVNAME"])) - log.Trace(fmt.Sprintf("[RunScanner] device from the event: %s", device.String())) + deviceName := device.Env["DEVNAME"] + log := log.WithValues("deviceName", deviceName) + log.Debug("event triggered for device") + log.Trace("device from the event", "device", device.String()) if !open { err := errors.New("EventChan has been closed when monitor udev event") - log.Error(err, "[RunScanner] unable to read from the event channel") + log.Error(err, "unable to read from the event channel") return err } t.Do(func() { - log.Info("[RunScanner] start to fill the cache") + log.Info("start to fill the cache") err := s.fillTheCache(ctx, log, sdsCache, cfg) if err != nil { - log.Error(err, "[RunScanner] unable to fill the cache. Retry") + log.Error(err, "unable to fill the cache. Retry") go func() { eventChan <- device }() return } - log.Info("[RunScanner] successfully filled the cache") + log.Info("successfully filled the cache") err = runControllersReconcile(ctx, log, bdCtrl, lvgDiscoverCtrl) if err != nil { - log.Error(err, "[RunScanner] unable to run controllers reconciliations") + log.Error(err, "unable to run controllers reconciliations") } - log.Info("[RunScanner] successfully ran the controllers reconcile funcs") + log.Info("successfully ran the controllers reconcile funcs") }) case err := <-errChan: - log.Error(err, "[RunScanner] Monitor udev event error") + log.Error(err, "Monitor udev event error") quit = conn.Monitor(eventChan, errChan, matcher) timer.Reset(duration) continue case <-quit: err := errors.New("receive quit signal when monitor udev event") - log.Error(err, "[RunScanner] unable to read from the event channel") + log.Error(err, "unable to read from the event channel") return err case <-timer.C: - log.Info("[RunScanner] events ran out. Start to fill the cache") + log.Info("events ran out. Start to fill the cache") err := s.fillTheCache(ctx, log, sdsCache, cfg) if err != nil { - log.Error(err, "[RunScanner] unable to fill the cache after all events passed. Retry") + log.Error(err, "unable to fill the cache after all events passed. Retry") timer.Reset(duration) continue } - log.Info("[RunScanner] successfully filled the cache after all events passed") + log.Info("successfully filled the cache after all events passed") err = runControllersReconcile(ctx, log, bdCtrl, lvgDiscoverCtrl) if err != nil { - log.Error(err, "[RunScanner] unable to run controllers reconciliations") + log.Error(err, "unable to run controllers reconciliations") } - log.Info("[RunScanner] successfully ran the controllers reconcile funcs") + log.Info("successfully ran the controllers reconcile funcs") } } } @@ -160,97 +162,102 @@ func runControllersReconcile( bdCtrl func(context.Context) (controller.Result, error), lvgDiscoverCtrl func(context.Context) (controller.Result, error), ) error { - log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", bd.DiscovererName)) + log = log.WithName("runControllersReconcile") + log = log.WithValues("discovererName", bd.DiscovererName) + log.Info("run reconcile") bdRes, err := bdCtrl(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", bd.DiscovererName)) + log.Error(err, "an error occurred while reconcile") return err } if bdRes.RequeueAfter > 0 { go func() { for bdRes.RequeueAfter > 0 { - log.Warning(fmt.Sprintf("[runControllersReconcile] BlockDevices reconcile needs a retry in %s", bdRes.RequeueAfter.String())) + log.Warning("BlockDevices reconcile needs a retry", "retryIn", bdRes.RequeueAfter) time.Sleep(bdRes.RequeueAfter) bdRes, err = bdCtrl(ctx) } - log.Info("[runControllersReconcile] successfully reconciled BlockDevices after a retry") + log.Info("successfully reconciled BlockDevices after a retry") }() } - log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", bd.DiscovererName)) + log.Info("run successfully reconciled") - log.Info(fmt.Sprintf("[runControllersReconcile] run %s reconcile", lvg.DiscovererName)) + log = log.WithValues("discovererName", lvg.DiscovererName) + log.Info("run reconcile") lvgRes, err := lvgDiscoverCtrl(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[runControllersReconcile] an error occurred while %s reconcile", lvg.DiscovererName)) + log.Error(err, "an error occurred while reconcile") return err } if lvgRes.RequeueAfter > 0 { go func() { for lvgRes.RequeueAfter > 0 { - log.Warning(fmt.Sprintf("[runControllersReconcile] LVMVolumeGroups reconcile needs a retry in %s", lvgRes.RequeueAfter.String())) + log.Warning("LVMVolumeGroups reconcile needs a retry", "retryIn", lvgRes.RequeueAfter) time.Sleep(lvgRes.RequeueAfter) lvgRes, err = lvgDiscoverCtrl(ctx) } - log.Info("[runControllersReconcile] successfully reconciled LVMVolumeGroups after a retry") + log.Info("successfully reconciled LVMVolumeGroups after a retry") }() } - log.Info(fmt.Sprintf("[runControllersReconcile] run %s successfully reconciled", lvg.DiscovererName)) + log.Info("run successfully reconciled") return nil } func (s *scanner) fillTheCache(ctx context.Context, log logger.Logger, cache *cache.Cache, cfg config.Config) error { + log = log.WithName("fillTheCache") // the scan operations order is very important as it guarantees the consistent and reliable data from the node realClock := clock.RealClock{} now := time.Now() lvs, lvsErr, err := s.scanLVs(ctx, log, cfg) - log.Trace(fmt.Sprintf("[fillTheCache] LVS command runs for: %s", realClock.Since(now).String())) + log.Trace("LVS command runs", "duration", realClock.Since(now)) if err != nil { return err } now = time.Now() vgs, vgsErr, err := s.scanVGs(ctx, log, cfg) - log.Trace(fmt.Sprintf("[fillTheCache] VGS command runs for: %s", realClock.Since(now).String())) + log.Trace("VGS command runs", "duration", realClock.Since(now)) if err != nil { return err } now = time.Now() pvs, pvsErr, err := s.scanPVs(ctx, log, cfg) - log.Trace(fmt.Sprintf("[fillTheCache] PVS command runs for: %s", realClock.Since(now).String())) + log.Trace("PVS command runs", "duration", realClock.Since(now)) if err != nil { return err } now = time.Now() devices, devErr, err := s.scanDevices(ctx, log, cfg) - log.Trace(fmt.Sprintf("[fillTheCache] LSBLK command runs for: %s", realClock.Since(now).String())) + log.Trace("LSBLK command runs", "duration", realClock.Since(now)) if err != nil { return err } - log.Debug("[fillTheCache] successfully scanned entities. Starts to fill the cache") + log.Debug("successfully scanned entities. Starts to fill the cache") cache.StoreDevices(devices, devErr) cache.StorePVs(pvs, pvsErr) cache.StoreVGs(vgs, vgsErr) cache.StoreLVs(lvs, lvsErr) - log.Debug("[fillTheCache] successfully filled the cache") + log.Debug("successfully filled the cache") cache.PrintTheCache(log) return nil } func (s *scanner) scanDevices(ctx context.Context, log logger.Logger, cfg config.Config) ([]internal.Device, bytes.Buffer, error) { + log = log.WithName("ScanDevices") ctx, cancel := context.WithTimeout(ctx, cfg.CmdDeadlineDuration) defer cancel() devices, cmdStr, stdErr, err := s.commands.GetBlockDevices(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[ScanDevices] unable to scan the devices, cmd: %s", cmdStr)) + log.Error(err, "unable to scan the devices", "command", cmdStr) return nil, stdErr, err } @@ -258,11 +265,12 @@ func (s *scanner) scanDevices(ctx context.Context, log logger.Logger, cfg config } func (s *scanner) scanPVs(ctx context.Context, log logger.Logger, cfg config.Config) ([]internal.PVData, bytes.Buffer, error) { + log = log.WithName("ScanPVs") ctx, cancel := context.WithTimeout(ctx, cfg.CmdDeadlineDuration) defer cancel() pvs, cmdStr, stdErr, err := s.commands.GetAllPVs(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[ScanPVs] unable to scan the PVs, cmd: %s", cmdStr)) + log.Error(err, "unable to scan the PVs", "command", cmdStr) return nil, stdErr, err } @@ -270,11 +278,12 @@ func (s *scanner) scanPVs(ctx context.Context, log logger.Logger, cfg config.Con } func (s *scanner) scanVGs(ctx context.Context, log logger.Logger, cfg config.Config) ([]internal.VGData, bytes.Buffer, error) { + log = log.WithName("ScanVGs") ctx, cancel := context.WithTimeout(ctx, cfg.CmdDeadlineDuration) defer cancel() vgs, cmdStr, stdErr, err := s.commands.GetAllVGs(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[ScanVGs] unable to scan the VGs, cmd: %s", cmdStr)) + log.Error(err, "unable to scan the VGs", "command", cmdStr) return nil, stdErr, err } @@ -282,11 +291,12 @@ func (s *scanner) scanVGs(ctx context.Context, log logger.Logger, cfg config.Con } func (s *scanner) scanLVs(ctx context.Context, log logger.Logger, cfg config.Config) ([]internal.LVData, bytes.Buffer, error) { + log = log.WithName("ScanLVs") ctx, cancel := context.WithTimeout(ctx, cfg.CmdDeadlineDuration) defer cancel() lvs, cmdStr, stdErr, err := s.commands.GetAllLVs(ctx) if err != nil { - log.Error(err, fmt.Sprintf("[ScanLVs] unable to scan LVs, cmd: %s", cmdStr)) + log.Error(err, "unable to scan LVs", "command", cmdStr) return nil, stdErr, err } diff --git a/images/agent/internal/utils/commands.go b/images/agent/internal/utils/commands.go index e135479b6..605c42bb1 100644 --- a/images/agent/internal/utils/commands.go +++ b/images/agent/internal/utils/commands.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -//go:generate go tool mockgen -write_source_comment -destination=../mock_utils/$GOFILE -source=$GOFILE +//go:generate go tool mockgen -write_source_comment -destination=../mock_utils/$GOFILE -source=$GOFILE -copyright_file=../../../../hack/boilerplate.txt package utils @@ -547,16 +547,17 @@ func (commands) UnmarshalDevices(out []byte) ([]internal.Device, error) { } func (c *commands) ReTag(ctx context.Context, log logger.Logger, metrics monitoring.Metrics, ctrlName string) error { + log = log.WithName("ReTag") // thin pool - log.Debug("[ReTag] start re-tagging LV") + log.Debug("start re-tagging LV") start := time.Now() lvs, cmdStr, _, err := c.GetAllLVs(ctx) metrics.UtilsCommandsDuration(ctrlName, "lvs").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(ctrlName, "lvs").Inc() - log.Debug(fmt.Sprintf("[ReTag] exec cmd: %s", cmdStr)) + log.Debug("exec cmd", "command", cmdStr) if err != nil { metrics.UtilsCommandsErrorsCount(ctrlName, "lvs").Inc() - log.Error(err, "[ReTag] unable to GetAllLVs") + log.Error(err, "unable to GetAllLVs") return err } @@ -568,14 +569,15 @@ func (c *commands) ReTag(ctx context.Context, log logger.Logger, metrics monitor } if strings.Contains(tag, internal.LVMTags[1]) { + log := log.WithValues("lvName", lv.LVName, "vgName", lv.VGName, "tag", tag) start = time.Now() cmdStr, err = c.LVChangeDelTag(lv, tag) metrics.UtilsCommandsDuration(ctrlName, "lvchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(ctrlName, "lvchange").Inc() - log.Debug(fmt.Sprintf("[ReTag] exec cmd: %s", cmdStr)) + log.Debug("exec cmd", "command", cmdStr) if err != nil { metrics.UtilsCommandsErrorsCount(ctrlName, "lvchange").Inc() - log.Error(err, "[ReTag] unable to LVChangeDelTag") + log.Error(err, "unable to LVChangeDelTag") return err } @@ -583,26 +585,26 @@ func (c *commands) ReTag(ctx context.Context, log logger.Logger, metrics monitor cmdStr, err = c.VGChangeAddTag(lv.VGName, internal.LVMTags[0]) metrics.UtilsCommandsDuration(ctrlName, "vgchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(ctrlName, "vgchange").Inc() - log.Debug(fmt.Sprintf("[ReTag] exec cmd: %s", cmdStr)) + log.Debug("exec cmd", "command", cmdStr) if err != nil { metrics.UtilsCommandsErrorsCount(ctrlName, "vgchange").Inc() - log.Error(err, "[ReTag] unable to VGChangeAddTag") + log.Error(err, "unable to VGChangeAddTag") return err } } } } - log.Debug("[ReTag] end re-tagging LV") + log.Debug("end re-tagging LV") - log.Debug("[ReTag] start re-tagging LVM") + log.Debug("start re-tagging LVM") start = time.Now() vgs, cmdStr, _, err := c.GetAllVGs(ctx) metrics.UtilsCommandsDuration(ctrlName, "vgs").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(ctrlName, "vgs").Inc() - log.Debug(fmt.Sprintf("[ReTag] exec cmd: %s", cmdStr)) + log.Debug("exec cmd", "command", cmdStr) if err != nil { metrics.UtilsCommandsErrorsCount(ctrlName, cmdStr).Inc() - log.Error(err, "[ReTag] unable to GetAllVGs") + log.Error(err, "unable to GetAllVGs") return err } @@ -614,14 +616,15 @@ func (c *commands) ReTag(ctx context.Context, log logger.Logger, metrics monitor } if strings.Contains(tag, internal.LVMTags[1]) { + log := log.WithValues("vgName", vg.VGName, "tag", tag) start = time.Now() cmdStr, err = c.VGChangeDelTag(vg.VGName, tag) metrics.UtilsCommandsDuration(ctrlName, "vgchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(ctrlName, "vgchange").Inc() - log.Debug(fmt.Sprintf("[ReTag] exec cmd: %s", cmdStr)) + log.Debug("exec cmd", "command", cmdStr) if err != nil { metrics.UtilsCommandsErrorsCount(ctrlName, "vgchange").Inc() - log.Error(err, "[ReTag] unable to VGChangeDelTag") + log.Error(err, "unable to VGChangeDelTag") return err } @@ -629,16 +632,16 @@ func (c *commands) ReTag(ctx context.Context, log logger.Logger, metrics monitor cmdStr, err = c.VGChangeAddTag(vg.VGName, internal.LVMTags[0]) metrics.UtilsCommandsDuration(ctrlName, "vgchange").Observe(metrics.GetEstimatedTimeInSeconds(start)) metrics.UtilsCommandsExecutionCount(ctrlName, "vgchange").Inc() - log.Debug(fmt.Sprintf("[ReTag] exec cmd: %s", cmdStr)) + log.Debug("exec cmd", "command", cmdStr) if err != nil { metrics.UtilsCommandsErrorsCount(ctrlName, "vgchange").Inc() - log.Error(err, "[ReTag] unable to VGChangeAddTag") + log.Error(err, "unable to VGChangeAddTag") return err } } } } - log.Debug("[ReTag] stop re-tagging LVM") + log.Debug("stop re-tagging LVM") return nil } diff --git a/images/agent/internal/utils/commands_ee.go b/images/agent/internal/utils/commands_ee.go index a8ca8ff27..45da777ea 100644 --- a/images/agent/internal/utils/commands_ee.go +++ b/images/agent/internal/utils/commands_ee.go @@ -19,14 +19,15 @@ import ( ) func ThinDumpRaw(ctx context.Context, log logger.Logger, tpool, tmeta, devID string) (out []byte, err error) { - log.Trace(fmt.Sprintf("[ThinDumpRaw] calling for tpool %s tmeta %s devID %s", tpool, tmeta, devID)) + log = log.WithName("ThinDumpRaw").WithValues("tpool", tpool, "tmeta", tmeta, "devID", devID) + log.Trace("calling for tpool tmeta devID") cmd := exec.CommandContext( ctx, internal.NSENTERCmd, nsentrerExpendedArgs(internal.DMSetupCmd, "message", tpool, "0", "reserve_metadata_snap")...) - log.Debug(fmt.Sprintf("[ThinDumpRaw] running %v", cmd)) + log.Debug("running command", "command", cmd.String()) if err = cmd.Run(); err != nil { - log.Error(err, fmt.Sprintf("[ThinDumpRaw] can't reserve metadata snapshot for %s", tpool)) + log.Error(err, "can't reserve metadata snapshot") err = fmt.Errorf("reserving metadata snapshot: %w", err) return } @@ -36,9 +37,9 @@ func ThinDumpRaw(ctx context.Context, log logger.Logger, tpool, tmeta, devID str internal.NSENTERCmd, nsentrerExpendedArgs(internal.DMSetupCmd, "message", tpool, "0", "release_metadata_snap")...) - log.Debug(fmt.Sprintf("[ThinDumpRaw] running %v", cmd)) + log.Debug("running command", "command", cmd) if errRelease := cmd.Run(); errRelease != nil { - log.Error(errRelease, fmt.Sprintf("[ThinDumpRaw] can't release metadata snapshot for %s", tpool)) + log.Error(errRelease, "can't release metadata snapshot") err = errors.Join(err, errRelease) } }() @@ -52,12 +53,12 @@ func ThinDumpRaw(ctx context.Context, log logger.Logger, tpool, tmeta, devID str var output bytes.Buffer cmd.Stdout = &output - log.Debug(fmt.Sprintf("[ThinDumpRaw] running %v", cmd)) + log.Debug("running command", "command", cmd) if err = cmd.Run(); err != nil { - log.Error(err, fmt.Sprintf("[ThinDumpRaw] can't get metadata %s", tmeta)) + log.Error(err, "can't get metadata") err = fmt.Errorf("dumping metadata: %w", err) return } - log.Trace(fmt.Sprintf("[ThinDumpRaw] device map is: %s", output.Bytes())) + log.Trace("device map", "output", output) return output.Bytes(), nil } diff --git a/images/agent/internal/utils/syscall.go b/images/agent/internal/utils/syscall.go index 69b81b73a..52e3f8159 100644 --- a/images/agent/internal/utils/syscall.go +++ b/images/agent/internal/utils/syscall.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -//go:generate go tool mockgen -write_source_comment -destination=../mock_utils/$GOFILE -source=$GOFILE +//go:generate go tool mockgen -write_source_comment -destination=../mock_utils/$GOFILE -source=$GOFILE -copyright_file=../../../../hack/boilerplate.txt package utils import ( diff --git a/images/agent/internal/utils/thin_dump_ee.go b/images/agent/internal/utils/thin_dump_ee.go index 3686f8d94..b4299cb1d 100644 --- a/images/agent/internal/utils/thin_dump_ee.go +++ b/images/agent/internal/utils/thin_dump_ee.go @@ -61,33 +61,36 @@ type SingleMapping struct { } func ThinDump(ctx context.Context, log logger.Logger, tpool, tmeta, devID string) (superblock Superblock, err error) { - log.Trace(fmt.Sprintf("[ThinDump] calling for tpool %s tmeta %s devID %s", tpool, tmeta, devID)) + log = log.WithName("ThinDump").WithValues("tpool", tpool, "tmeta", tmeta, "devID", devID) var rawOut []byte rawOut, err = ThinDumpRaw(ctx, log, tpool, tmeta, devID) if err != nil { + log.Error(err, "Calling ThinDumpRaw") return } - log.Debug("[ThinDump] unmarshaling") + log.Debug("unmarshaling") if err = xml.Unmarshal(rawOut, &superblock); err != nil { - log.Error(err, "[ThinDump] unmarshaling error") + log.Error(err, "unmarshaling error") err = fmt.Errorf("parsing metadata: %w", err) return } - log.Trace(fmt.Sprintf("[ThinDump] unmarshaled: %v", superblock)) + log.Trace("unmarshaled", "superblock", superblock) return superblock, nil } func ThinVolumeUsedRanges(_ context.Context, log logger.Logger, superblock Superblock, deviceID LVMThinDeviceID) (blockRanges RangeCover, err error) { - log.Trace(fmt.Sprintf("[ThinVolumeUsedRanges] calling for deviceId %d", deviceID)) + log = log.WithName("ThinVolumeUsedRanges").WithValues("deviceID", deviceID) + log.Trace("calling for deviceId") for _, device := range superblock.Devices { if device.DevID != deviceID { continue } + log := log.WithValues("devID", device.DevID) blockRanges = make(RangeCover, 0, len(device.RangeMappings)+len(device.SingleMappings)) for _, mapping := range device.RangeMappings { @@ -101,6 +104,7 @@ func ThinVolumeUsedRanges(_ context.Context, log logger.Logger, superblock Super blockRanges, err = blockRanges.Merged() if err != nil { err = fmt.Errorf("finding used ranges: %w", err) + log.Error(err, "merging ranges") return } diff --git a/images/agent/internal/utils/volume_cleanup_ee.go b/images/agent/internal/utils/volume_cleanup_ee.go index f04b76457..77d6fac37 100644 --- a/images/agent/internal/utils/volume_cleanup_ee.go +++ b/images/agent/internal/utils/volume_cleanup_ee.go @@ -24,10 +24,11 @@ import ( ) func VolumeCleanup(ctx context.Context, log logger.Logger, sdsCache *cache.Cache, lv *cache.LVData, volumeCleanup string) (shouldRequeue bool, err error) { + log = log.WithName("VolumeCleanup") vgName := lv.Data.VGName lvName := lv.Data.LVName - log.Debug("[deleteLVIfNeeded] finding used blocks") + log.Debug("finding used blocks") usedBlockRanges, err := UsedBlockRangeForThinVolume(ctx, log, sdsCache, lv) if err != nil { @@ -41,7 +42,8 @@ func VolumeCleanup(ctx context.Context, log logger.Logger, sdsCache *cache.Cache } func VolumeCleanupWithRangeCover(ctx context.Context, log logger.Logger, deviceOpener BlockDeviceOpener, vgName string, lvName, volumeCleanup string, usedBlockRanges *RangeCover) error { - log.Trace(fmt.Sprintf("[VolumeCleanup] cleaning up volume %s in volume group %s using %s with block ranges %v", lvName, vgName, volumeCleanup, usedBlockRanges)) + log = log.WithName("VolumeCleanupWithRangeCover").WithValues("lvName", lvName, "vgName", vgName, "volumeCleanup", volumeCleanup) + log.Trace("cleaning up volume in volume group using with block ranges", "usedBlockRanges", usedBlockRanges) devicePath := filepath.Join("/dev", vgName, lvName) randomSource := "/dev/urandom" @@ -60,7 +62,7 @@ func VolumeCleanupWithRangeCover(ctx context.Context, log logger.Logger, deviceO } if err != nil { - log.Error(err, fmt.Sprintf("[VolumeCleanup] fail to cleanup volume %s", devicePath)) + log.Error(err, "fail to cleanup volume", "devicePath", devicePath) return fmt.Errorf("cleaning volume %s: %w", devicePath, err) } @@ -68,30 +70,32 @@ func VolumeCleanupWithRangeCover(ctx context.Context, log logger.Logger, deviceO } func UsedBlockRangeForThinVolume(ctx context.Context, log logger.Logger, sdsCache *cache.Cache, lv *cache.LVData) (*RangeCover, error) { + log = log.WithName("UsedBlockRangeForThinVolume") if lv.Data.PoolName == "" { return nil, nil } vgName := lv.Data.VGName lvName := lv.Data.LVName + log = log.WithValues("lvName", lvName, "vgName", vgName) tpool, poolMetadataMapper, err := sdsCache.FindThinPoolMappers(lv) if err != nil { err = fmt.Errorf("finding mappers for thin pool %s: %w", lv.Data.PoolName, err) - log.Error(err, fmt.Sprintf("[UsedBlockRangeForThinVolume] can't find pool for LV %s in VG %s", lvName, vgName)) + log.Error(err, "can't find pool for LV in VG") return nil, err } - log.Debug(fmt.Sprintf("[UsedBlockRangeForThinVolume] tpool %s tmeta %s", tpool, poolMetadataMapper)) + log.Debug("tpool tmeta", "tpool", tpool, "tmeta", poolMetadataMapper) if lv.Data.ThinID == "" { err = fmt.Errorf("missing deviceId for thin volume %s", lvName) - log.Error(err, fmt.Sprintf("[UsedBlockRangeForThinVolume] can't find pool for LV %s in VG %s", lvName, vgName)) + log.Error(err, "can't find pool for LV in VG") return nil, err } superblock, err := ThinDump(ctx, log, tpool, poolMetadataMapper, lv.Data.ThinID) if err != nil { err = fmt.Errorf("dumping thin pool map: %w", err) - log.Error(err, fmt.Sprintf("[UsedBlockRangeForThinVolume] can't find pool map for LV %s in VG %s", lvName, vgName)) + log.Error(err, "can't find pool map for LV in VG") return nil, err } thinID, err := strconv.Atoi(lv.Data.ThinID) @@ -99,37 +103,42 @@ func UsedBlockRangeForThinVolume(ctx context.Context, log logger.Logger, sdsCach err = fmt.Errorf("deviceId %s is not a number: %w", lv.Data.ThinID, err) return nil, err } - log.Debug(fmt.Sprintf("[UsedBlockRangeForThinVolume] ThinID %d", thinID)) + log.Debug("ThinID", "thinID", thinID) blockRanges, err := ThinVolumeUsedRanges(ctx, log, superblock, LVMThinDeviceID(thinID)) if err != nil { err = fmt.Errorf("finding used ranges for deviceId %d in thin pool %s: %w", thinID, lv.Data.PoolName, err) return nil, err } - log.Debug(fmt.Sprintf("[UsedBlockRangeForThinVolume] ranges %v", blockRanges)) + log.Debug("ranges", "blockRanges", blockRanges) return &blockRanges, nil } func volumeCleanupOverwrite(_ context.Context, log logger.Logger, deviceOpener BlockDeviceOpener, devicePath, inputPath string, passes int, usedBlockRanges *RangeCover) (err error) { - log.Trace(fmt.Sprintf("[volumeCleanupOverwrite] overwriting %s by %s in %d passes", devicePath, inputPath, passes)) + log = log.WithName("volumeCleanupOverwrite").WithValues( + "devicePath", devicePath, + "inputPath", inputPath, + "passes", passes) + log.Trace("overwriting by in passes") + closeFile := func(file BlockDevice) { - log.Trace(fmt.Sprintf("[volumeCleanupOverwrite] closing %s", file.Name())) + log.Trace("closing", "fileName", file.Name()) closingErr := file.Close() if closingErr != nil { - log.Error(closingErr, fmt.Sprintf("[volumeCleanupOverwrite] While closing file %s", file.Name())) + log.Error(closingErr, "While closing file", "fileName", file.Name()) err = errors.Join(err, fmt.Errorf("closing file %s: %w", file.Name(), closingErr)) } } input, err := deviceOpener.Open(inputPath, unix.O_RDONLY) if err != nil { - log.Error(err, fmt.Sprintf("[volumeCleanupOverwrite] Opening file %s", inputPath)) + log.Error(err, "Opening file") return fmt.Errorf("opening source device %s to wipe: %w", inputPath, err) } defer closeFile(input) output, err := deviceOpener.Open(devicePath, unix.O_DIRECT|unix.O_RDWR) if err != nil { - log.Error(err, fmt.Sprintf("[volumeCleanupOverwrite] Opening file %s", devicePath)) + log.Error(err, "Opening file") return fmt.Errorf("opening device %s to wipe: %w", devicePath, err) } defer closeFile(output) @@ -139,43 +148,46 @@ func volumeCleanupOverwrite(_ context.Context, log logger.Logger, deviceOpener B if usedBlockRanges == nil { size, err := output.Size() if err != nil { - log.Error(err, "[volumeCleanupOverwrite] Finding volume size") + log.Error(err, "Finding volume size") return fmt.Errorf("can't find the size of device %s: %w", devicePath, err) } - log.Debug(fmt.Sprintf("[volumeCleanupOverwrite] device size is %d. Overwriting whole device.", size)) + log.Debug("device size. Overwriting whole device", "size", size) usedByteRanges = RangeCover{Range{Start: 0, Count: size}} } else { blockSize, err := output.BlockSize() if err != nil { - log.Error(err, "[volumeCleanupOverwrite] Finding block size") + log.Error(err, "Finding block size") return fmt.Errorf("can't find the block size of device %s: %w", devicePath, err) } - log.Debug(fmt.Sprintf("[volumeCleanupOverwrite] device block size is %d", blockSize)) + log.Debug("device block size", "blockSize", blockSize) usedByteRanges = usedBlockRanges.Multiplied(int64(blockSize)) } - log.Debug(fmt.Sprintf("[volumeCleanupOverwrite] overwriting byte ranges %v", usedByteRanges)) + log.Debug("overwriting byte ranges", "usedByteRanges", usedByteRanges) bufferSize := 1024 * 1024 * 4 buffer := make([]byte, bufferSize) for pass := 0; pass < passes; pass++ { for _, usedByteRange := range usedByteRanges { bytesToWrite := usedByteRange.Count - log.Debug(fmt.Sprintf("[volumeCleanupOverwrite] Overwriting %d bytes with offset %d. Pass %d", bytesToWrite, usedByteRange.Start, pass)) + log.Debug("Overwriting bytes with offset. Pass", + "bytesToWrite", bytesToWrite, + "offset", usedByteRange.Start, + "pass", pass) start := time.Now() written, err := io.CopyBuffer( io.NewOffsetWriter(output, usedByteRange.Start), io.LimitReader(input, bytesToWrite), buffer) - log.Info(fmt.Sprintf("[volumeCleanupOverwrite] Overwriting is done in %s", time.Since(start).String())) + log.Info("Overwriting is done", "duration", time.Since(start)) if err != nil { - log.Error(err, fmt.Sprintf("[volumeCleanupOverwrite] copying from %s to %s", inputPath, devicePath)) + log.Error(err, "copying from to") return fmt.Errorf("copying from %s to %s: %w", inputPath, devicePath, err) } if written != bytesToWrite { - log.Error(err, fmt.Sprintf("[volumeCleanupOverwrite] only %d bytes written, expected %d", written, bytesToWrite)) + log.Error(err, "only bytes written, expected", "written", written, "expected", bytesToWrite) return fmt.Errorf("only %d bytes written, expected %d", written, bytesToWrite) } } @@ -185,31 +197,32 @@ func volumeCleanupOverwrite(_ context.Context, log logger.Logger, deviceOpener B } func volumeCleanupDiscard(_ context.Context, log logger.Logger, deviceOpener BlockDeviceOpener, devicePath string) (err error) { - log.Trace(fmt.Sprintf("[volumeCleanupDiscard] discarding %s", devicePath)) + log = log.WithName("volumeCleanupDiscard").WithValues("devicePath", devicePath) + log.Trace("discarding") device, err := deviceOpener.Open(devicePath, unix.O_RDWR) if err != nil { - log.Error(err, fmt.Sprintf("[volumeCleanupDiscard] Opening device %s", devicePath)) + log.Error(err, "Opening device") return fmt.Errorf("opening device %s to wipe: %w", devicePath, err) } defer func() { - log.Trace(fmt.Sprintf("Closing file %s", devicePath)) + log.Trace("Closing file") closingErr := device.Close() if closingErr != nil { - log.Error(closingErr, fmt.Sprintf("[volumeCleanupDiscard] While closing deice %s", devicePath)) + log.Error(closingErr, "While closing device") err = errors.Join(err, fmt.Errorf("closing file %s: %w", device.Name(), closingErr)) } }() deviceSize, err := device.Size() if err != nil { - log.Error(err, fmt.Sprintf("[volumeCleanupDiscard] can't find the size of device %s", devicePath)) + log.Error(err, "can't find the size of device") return fmt.Errorf("can't find the size of device %s: %w", devicePath, err) } start := time.Now() - log.Debug(fmt.Sprintf("[volumeCleanupDiscard] Discarding all %d bytes", deviceSize)) + log.Debug("Discarding all bytes", "deviceSize", deviceSize) defer func() { - log.Info(fmt.Sprintf("[volumeCleanupDiscard] Discarding is done in %s", time.Since(start).String())) + log.Info("Discarding is done", "duration", time.Since(start)) }() return device.Discard(0, uint64(deviceSize)) diff --git a/images/sds-health-watcher-controller/cmd/main.go b/images/sds-health-watcher-controller/cmd/main.go index 56b637da9..44179cb0e 100644 --- a/images/sds-health-watcher-controller/cmd/main.go +++ b/images/sds-health-watcher-controller/cmd/main.go @@ -65,29 +65,31 @@ func main() { os.Exit(1) } - log.Info(fmt.Sprintf("[main] Go Version:%s ", goruntime.Version())) - log.Info(fmt.Sprintf("[main] OS/Arch:Go OS/Arch:%s/%s ", goruntime.GOOS, goruntime.GOARCH)) + mainLog := log.WithName("main") - log.Info("[main] CfgParams has been successfully created") - log.Info(fmt.Sprintf("[main] %s = %s", config.LogLevel, cfgParams.Loglevel)) - log.Info(fmt.Sprintf("[main] %s = %s", config.MetricsPort, cfgParams.MetricsPort)) - log.Info(fmt.Sprintf("[main] %s = %s", config.ScanInterval, cfgParams.ScanIntervalSec)) + mainLog.Info(fmt.Sprintf("Go Version:%s ", goruntime.Version())) + mainLog.Info(fmt.Sprintf("OS/Arch:Go OS/Arch:%s/%s ", goruntime.GOOS, goruntime.GOARCH)) + + mainLog.Info("CfgParams has been successfully created") + mainLog.Info(fmt.Sprintf("%s = %s", config.LogLevel, cfgParams.Loglevel)) + mainLog.Info(fmt.Sprintf("%s = %s", config.MetricsPort, cfgParams.MetricsPort)) + mainLog.Info(fmt.Sprintf("%s = %s", config.ScanInterval, cfgParams.ScanIntervalSec)) kConfig, err := kubutils.KubernetesDefaultConfigCreate() if err != nil { - log.Error(err, "[main] unable to KubernetesDefaultConfigCreate") + mainLog.Error(err, "unable to KubernetesDefaultConfigCreate") } - log.Info("[main] kubernetes config has been successfully created.") + mainLog.Info("kubernetes config has been successfully created.") scheme := runtime.NewScheme() for _, f := range resourcesSchemeFuncs { err := f(scheme) if err != nil { - log.Error(err, "[main] unable to add scheme to func") + mainLog.Error(err, "unable to add scheme to func") os.Exit(1) } } - log.Info("[main] successfully read scheme CR") + mainLog.Info("successfully read scheme CR") managerOpts := manager.Options{ Scheme: scheme, @@ -98,61 +100,61 @@ func main() { mgr, err := manager.New(kConfig, managerOpts) if err != nil { - log.Error(err, "[main] unable to manager.New") + mainLog.Error(err, "unable to manager.New") os.Exit(1) } - log.Info("[main] successfully created kubernetes manager") + mainLog.Info("successfully created kubernetes manager") metrics := monitoring.GetMetrics(cfgParams.NodeName) controller.RunSdsInfraWatcher(ctx, mgr, *cfgParams, metrics, *log) err = controller.RunLVGConditionsWatcher(mgr, *cfgParams, *log) if err != nil { - log.Error(err, "[main] unable to run LVGConditionsWatcher controller") + mainLog.Error(err, "unable to run LVGConditionsWatcher controller") os.Exit(1) } err = controller.RunLVGStatusWatcher(mgr, *log) if err != nil { - log.Error(err, "[main] unable to run LVGConfigurationWatcher controller") + mainLog.Error(err, "unable to run LVGConfigurationWatcher controller") os.Exit(1) } err = controller.RunMCWatcher(mgr, *log) if err != nil { - log.Error(err, "[main] unable to run MCWatcher controller") + mainLog.Error(err, "unable to run MCWatcher controller") os.Exit(1) } err = controller.RunBlockDeviceLabelsWatcher(mgr, *log, *cfgParams) if err != nil { - log.Error(err, "[main] unable to run BlockDeviceWatcher controller") + mainLog.Error(err, "unable to run BlockDeviceWatcher controller") os.Exit(1) } err = controller.RunLVMVolumeGroupSetWatcher(mgr, *log, *cfgParams, metrics) if err != nil { - log.Error(err, "[main] unable to run RunLVMVolumeGroupSetWatcher controller") + mainLog.Error(err, "unable to run RunLVMVolumeGroupSetWatcher controller") os.Exit(1) } if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - log.Error(err, "[main] unable to mgr.AddHealthzCheck") + mainLog.Error(err, "unable to mgr.AddHealthzCheck") os.Exit(1) } - log.Info("[main] successfully AddHealthzCheck") + mainLog.Info("successfully AddHealthzCheck") if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - log.Error(err, "[main] unable to mgr.AddReadyzCheck") + mainLog.Error(err, "unable to mgr.AddReadyzCheck") os.Exit(1) } - log.Info("[main] successfully AddReadyzCheck") + mainLog.Info("successfully AddReadyzCheck") err = mgr.Start(ctx) if err != nil { - log.Error(err, "[main] unable to mgr.Start") + mainLog.Error(err, "unable to mgr.Start") os.Exit(1) } - log.Info("[main] successfully starts the manager") + mainLog.Info("successfully starts the manager") } diff --git a/images/sds-health-watcher-controller/go.mod b/images/sds-health-watcher-controller/go.mod index 90e34c3a8..586a8dd62 100644 --- a/images/sds-health-watcher-controller/go.mod +++ b/images/sds-health-watcher-controller/go.mod @@ -6,7 +6,7 @@ require ( github.com/cloudflare/cfssl v1.6.5 github.com/deckhouse/sds-common-lib v0.5.0 github.com/deckhouse/sds-node-configurator/api v0.0.0-20250114161813-c1a8b09cd47d - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/prometheus/client_golang v1.22.0 github.com/stretchr/testify v1.10.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/images/sds-health-watcher-controller/go.sum b/images/sds-health-watcher-controller/go.sum index 03058b2c2..b4e18c86c 100644 --- a/images/sds-health-watcher-controller/go.sum +++ b/images/sds-health-watcher-controller/go.sum @@ -20,8 +20,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= diff --git a/images/sds-health-watcher-controller/pkg/controller/block_device_labels_watcher.go b/images/sds-health-watcher-controller/pkg/controller/block_device_labels_watcher.go index 9db329d2c..d95305765 100644 --- a/images/sds-health-watcher-controller/pkg/controller/block_device_labels_watcher.go +++ b/images/sds-health-watcher-controller/pkg/controller/block_device_labels_watcher.go @@ -18,7 +18,6 @@ package controller import ( "context" - "fmt" "reflect" "k8s.io/apimachinery/pkg/api/errors" @@ -52,64 +51,69 @@ func RunBlockDeviceLabelsWatcher( log logger.Logger, cfg config.Options, ) error { + log = log.WithName("RunBlockDeviceLabelsWatcher") cl := mgr.GetClient() c, err := controller.New(BlockDeviceLabelsWatcherCtrlName, mgr, controller.Options{ Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] starts to reconcile the BlockDevice %s", request.Name)) + log := log.WithName("Reconcile").WithValues("blockDeviceName", request.Name) + log.Info("starts to reconcile the BlockDevice") bd := &v1alpha1.BlockDevice{} err := cl.Get(ctx, request.NamespacedName, bd) if err != nil { if errors.IsNotFound(err) { - log.Warning(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] seems like the BlockDevice %s was removed as it was not found. Stop the reconcile", request.Name)) + log.Warning("seems like the BlockDevice was removed as it was not found. Stop the reconcile") return reconcile.Result{}, nil } - log.Error(err, fmt.Sprintf("[RunBlockDeviceLabelsWatcher] unable to get the BlockDevice %s", request.Name)) + log.Error(err, "unable to get the BlockDevice") return reconcile.Result{}, err } + log = log.WithValues("blockDeviceName", bd.Name) shouldRequeue, err := reconcileBlockDeviceLabels(ctx, cl, log, bd) if err != nil { - log.Error(err, fmt.Sprintf("[RunBlockDeviceLabelsWatcher] unable to reconcile the BlockDevice %s", bd.Name)) + log.Error(err, "unable to reconcile the BlockDevice") return reconcile.Result{}, err } if shouldRequeue { - log.Warning(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] the request for the BlockDevice %s should be requeued in %s", bd.Name, cfg.ScanIntervalSec.String())) + log.Warning("the request for the BlockDevice should be requeued", "requeueIn", cfg.ScanIntervalSec) return reconcile.Result{RequeueAfter: cfg.ScanIntervalSec}, nil } - log.Info(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] the BlockDevice %s was successfully reconciled", bd.Name)) + log.Info("the BlockDevice was successfully reconciled") return reconcile.Result{}, nil }), }) if err != nil { - log.Error(err, "[RunBlockDeviceLabelsWatcher] unable to create the controller") + log.Error(err, "unable to create the controller") return err } err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.BlockDevice{}, handler.TypedFuncs[*v1alpha1.BlockDevice, reconcile.Request]{ CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.BlockDevice], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Debug(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] got a Create event for the BlockDevice %s", e.Object.Name)) + log := log.WithName("CreateFunc").WithValues("blockDeviceName", e.Object.Name) + log.Debug("got a Create event for the BlockDevice") q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}}) - log.Debug(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] the BlockDevice %s was added to the Reconciler's queue", e.Object.Name)) + log.Debug("the BlockDevice was added to the Reconciler's queue") }, UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.BlockDevice], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Debug(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] got an Update event for the BlockDevice %s", e.ObjectNew.Name)) + log := log.WithName("UpdateFunc").WithValues("blockDeviceName", e.ObjectNew.Name) + log.Debug("got an Update event for the BlockDevice") if reflect.DeepEqual(e.ObjectOld.Labels, e.ObjectNew.Labels) { - log.Debug(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] no need to reconcile the BlockDevice %s as its labels are the same", e.ObjectNew.Name)) + log.Debug("no need to reconcile the BlockDevice as its labels are the same") return } q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}}) - log.Debug(fmt.Sprintf("[RunBlockDeviceLabelsWatcher] the BlockDevice %s was added to the Reconciler's queue", e.ObjectNew.Name)) + log.Debug("the BlockDevice was added to the Reconciler's queue") }, })) if err != nil { - log.Error(err, "[RunBlockDeviceLabelsWatcher] unable to controller.Watch") + log.Error(err, "unable to controller.Watch") return err } @@ -117,36 +121,38 @@ func RunBlockDeviceLabelsWatcher( } func reconcileBlockDeviceLabels(ctx context.Context, cl client.Client, log logger.Logger, blockDevice *v1alpha1.BlockDevice) (bool, error) { - log.Info(fmt.Sprintf("[reconcileBlockDeviceLabels] starts the reconciliation for the BlockDevice %s", blockDevice.Name)) + log = log.WithName("reconcileBlockDeviceLabels").WithValues("blockDeviceName", blockDevice.Name) + log.Info("starts the reconciliation for the BlockDevice") shouldRetry := false - log.Debug("[reconcileBlockDeviceLabels] tries to list LVMVolumeGroups") + log.Debug("tries to list LVMVolumeGroups") lvgList := &v1alpha1.LVMVolumeGroupList{} err := cl.List(ctx, lvgList) if err != nil { return false, err } - log.Debug("[reconcileBlockDeviceLabels] successfully listed LVMVolumeGroups") + log.Debug("successfully listed LVMVolumeGroups") for _, lvg := range lvgList.Items { + log := log.WithValues("lvgName", lvg.Name) if len(lvg.Status.Nodes) == 0 { - log.Info(fmt.Sprintf("[reconcileBlockDeviceLabels] LVMVolumeGroup %s nodes are not configured yet, retry later...", lvg.Name)) + log.Info("LVMVolumeGroup nodes are not configured yet, retry later...") shouldRetry = true continue } if checkIfLVGInProgress(&lvg) { - log.Warning(fmt.Sprintf("[reconcileBlockDeviceLabels] the LVMVolumeGroup %s is in a progress, retry later...", lvg.Name)) + log.Warning("the LVMVolumeGroup is in a progress, retry later...") shouldRetry = true continue } - log.Debug(fmt.Sprintf("[reconcileBlockDeviceLabels] tries to configure a selector from blockDeviceSelector of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("tries to configure a selector from blockDeviceSelector of the LVMVolumeGroup") selector, err := metav1.LabelSelectorAsSelector(lvg.Spec.BlockDeviceSelector) if err != nil { return false, err } - log.Debug(fmt.Sprintf("[reconcileBlockDeviceLabels] successfully configured a selector from blockDeviceSelector of the LVMVolumeGroup %s", lvg.Name)) + log.Debug("successfully configured a selector from blockDeviceSelector of the LVMVolumeGroup") usedBdNames := make(map[string]struct{}, len(lvg.Status.Nodes[0].Devices)) for _, n := range lvg.Status.Nodes { @@ -167,12 +173,13 @@ func reconcileBlockDeviceLabels(ctx context.Context, cl client.Client, log logge lvg.Labels = make(map[string]string) } lvg.Labels[LVGUpdateTriggerLabel] = "true" - log.Info(fmt.Sprintf("[reconcileBlockDeviceLabels] the LVMVolumeGroup %s should be triggered to update its configuration. Add the label %s to the resource", lvg.Name, LVGUpdateTriggerLabel)) + log.Info("the LVMVolumeGroup should be triggered to update its configuration. Add the label to the resource", "label", LVGUpdateTriggerLabel) err = cl.Update(ctx, &lvg) if err != nil { return false, err } - log.Info(fmt.Sprintf("[reconcileBlockDeviceLabels] successfully added the label %s to provide LVMVolumeGroup %s resource configuration update", LVGUpdateTriggerLabel, lvg.Name)) + log.Info("successfully added the label to provide LVMVolumeGroup resource configuration update", + "label", LVGUpdateTriggerLabel) } } @@ -192,31 +199,42 @@ func checkIfLVGInProgress(newLVG *v1alpha1.LVMVolumeGroup) bool { } func shouldTriggerLVGUpdateIfMatches(log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, blockDevice *v1alpha1.BlockDevice, usedBdNames map[string]struct{}) bool { - log.Debug(fmt.Sprintf("[reconcileBlockDeviceLabels] BlockDevice %s matches a blockDeviceSelector of the LVMVolumeGroup %s", blockDevice.Name, lvg.Name)) + log = log. + WithName("shouldTriggerLVGUpdateIfMatches"). + WithValues( + "blockDeviceName", blockDevice.Name, + "lvgName", lvg.Name) + log.Debug("BlockDevice matches a blockDeviceSelector of the LVMVolumeGroup") if _, used := usedBdNames[blockDevice.Name]; !used { - log.Info(fmt.Sprintf("[reconcileBlockDeviceLabels] the BlockDevice %s matches the LVMVolumeGroup %s blockDeviceSelector, but is not used yet", blockDevice.Name, lvg.Name)) + log.Info("the BlockDevice matches the LVMVolumeGroup blockDeviceSelector, but is not used yet") return true } // for the case when BlockDevice stopped match the LVG blockDeviceSelector and then start again for _, c := range lvg.Status.Conditions { if c.Type == internal.TypeVGConfigurationApplied && c.Status == metav1.ConditionFalse { - log.Warning(fmt.Sprintf("[reconcileBlockDeviceLabels] the BlockDevice %s matches the LVMVolumeGroup %s blockDeviceSelector, but the LVMVolumeGroup has condition %s in status False", blockDevice.Name, lvg.Name, c.Type)) + log.Warning("the BlockDevice matches the LVMVolumeGroup blockDeviceSelector, but the LVMVolumeGroup has condition in status False", + "conditionType", c.Type) return true } } - log.Debug(fmt.Sprintf("[reconcileBlockDeviceLabels] the BlockDevice %s matches the LVMVolumeGroup %s blockDeviceSelector and already used by the resource", blockDevice.Name, lvg.Name)) + log.Debug("the BlockDevice matches the LVMVolumeGroup blockDeviceSelector and already used by the resource") return false } func shouldTriggerLVGUpdateIfNotMatches(log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, blockDevice *v1alpha1.BlockDevice, usedBdNames map[string]struct{}) bool { - log.Debug(fmt.Sprintf("[reconcileBlockDeviceLabels] BlockDevice %s does not match a blockDeviceSelector of the LVMVolumeGroup %s", blockDevice.Name, lvg.Name)) + log = log. + WithName("shouldTriggerLVGUpdateIfNotMatches"). + WithValues( + "blockDeviceName", blockDevice.Name, + "lvgName", lvg.Name) + log.Debug("BlockDevice does not match a blockDeviceSelector of the LVMVolumeGroup") if _, used := usedBdNames[blockDevice.Name]; used { - log.Warning(fmt.Sprintf("[reconcileBlockDeviceLabels] the BlockDevice %s does not match the LVMVolumeGroup %s blockDeviceSelector, but is used by the resource", blockDevice.Name, lvg.Name)) + log.Warning("the BlockDevice does not match the LVMVolumeGroup blockDeviceSelector, but is used by the resource") return true } - log.Debug(fmt.Sprintf("[reconcileBlockDeviceLabels] the BlockDevice %s does not match the LVMVolumeGroup %s blockDeviceSelector and is not used by the resource", blockDevice.Name, lvg.Name)) + log.Debug("the BlockDevice does not match the LVMVolumeGroup blockDeviceSelector and is not used by the resource") return false } diff --git a/images/sds-health-watcher-controller/pkg/controller/lvg_conditions_watcher.go b/images/sds-health-watcher-controller/pkg/controller/lvg_conditions_watcher.go index fdb8cb1a6..4efdf05e3 100644 --- a/images/sds-health-watcher-controller/pkg/controller/lvg_conditions_watcher.go +++ b/images/sds-health-watcher-controller/pkg/controller/lvg_conditions_watcher.go @@ -55,59 +55,64 @@ func RunLVGConditionsWatcher( cfg config.Options, log logger.Logger, ) error { + log = log.WithName("RunLVGConditionsWatcher") cl := mgr.GetClient() c, err := controller.New(SdsLVGConditionsWatcherCtrlName, mgr, controller.Options{ Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] Reconciler got a request %s", request.String())) + log := log.WithName("Reconcile").WithValues("lvgName", request.Name) + log.Info("Reconciler got a request") lvg := &v1alpha1.LVMVolumeGroup{} err := cl.Get(ctx, request.NamespacedName, lvg) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVGConditionsWatcher] unable to get the LVMVolumeGroup %s", request.Name)) + log.Error(err, "unable to get the LVMVolumeGroup") return reconcile.Result{}, err } if lvg.Name == "" { - log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] seems like the LVMVolumeGroup for the request %s was deleted. Reconcile will stop.", request.Name)) + log.Info("seems like the LVMVolumeGroup for the request was deleted. Reconcile will stop") return reconcile.Result{}, nil } + log = log.WithValues("lvgName", lvg.Name) shouldRequeue, err := reconcileLVGConditions(ctx, cl, log, lvg) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVGConditionsWatcher] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to reconcile the LVMVolumeGroup") } if shouldRequeue { - log.Warning(fmt.Sprintf("[RunLVGConditionsWatcher] the LVMVolumeGroup %s request will be requeued in %s", lvg.Name, cfg.ScanIntervalSec.String())) + log.Warning("the LVMVolumeGroup request will be requeued", "requeueIn", cfg.ScanIntervalSec) return reconcile.Result{ RequeueAfter: cfg.ScanIntervalSec, }, nil } - log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] Reconciler successfully reconciled the LVMVolumeGroup %s", lvg.Name)) + log.Info("Reconciler successfully reconciled the LVMVolumeGroup") return reconcile.Result{}, nil }), }) if err != nil { - log.Error(err, "[RunLVGConditionsWatcher] unable to create a controller") + log.Error(err, "unable to create a controller") return err } err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] got a create event for the LVMVolumeGroup %s", e.Object.GetName())) + log := log.WithName("CreateFunc").WithValues("lvgName", e.Object.GetName()) + log.Info("got a create event for the LVMVolumeGroup") request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] createFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) + log.Info("createFunc added a request for the LVMVolumeGroup to the Reconcilers queue") }, UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] got a update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) + log := log.WithName("UpdateFunc").WithValues("lvgName", e.ObjectNew.GetName()) + log.Info("got an update event for the LVMVolumeGroup") if reflect.DeepEqual(e.ObjectOld.Status.Conditions, e.ObjectNew.Status.Conditions) { - log.Info(fmt.Sprintf("[RunLVGConditionsWatcher] no condition changes for the LVMVolumeGroup %s. No need to reconcile", e.ObjectNew.Name)) + log.Info("no condition changes for the LVMVolumeGroup. No need to reconcile") return } @@ -116,7 +121,7 @@ func RunLVGConditionsWatcher( }, })) if err != nil { - log.Error(err, "[RunLVGConditionsWatcher] unable to watch the events") + log.Error(err, "unable to watch the events") return err } @@ -124,73 +129,76 @@ func RunLVGConditionsWatcher( } func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLVGConditions] starts the reconciliation for the LVMVolumeGroup %s", lvg.Name)) + log = log.WithName("reconcileLVGConditions") + log.Debug("starts the reconciliation for the LVMVolumeGroup") if lvg.Status.Conditions == nil { - log.Info(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s has no conditions, retry later", lvg.Name)) + log.Info("the LVMVolumeGroup has no conditions, retry later") return true, nil } crd, err := getCRD(ctx, cl, lvgCrdName) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to get crd %s", lvgCrdName)) + log.Error(err, "unable to get crd", "crdName", lvgCrdName) return true, err } targetConCount, err := getTargetConditionsCount(crd) if err != nil { - log.Error(err, "[reconcileLVGConditions] unable to get target conditions count") + log.Error(err, "unable to get target conditions count") return true, err } if len(lvg.Status.Conditions) < targetConCount { - log.Info(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s misses some conditions, wait for them to got configured", lvg.Name)) - log.Debug(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s conditions current count: %d, target count: %d", lvg.Name, len(lvg.Status.Conditions), targetConCount)) + log.Info("the LVMVolumeGroup misses some conditions, wait for them to got configured", + "currentCount", len(lvg.Status.Conditions), + "targetCount", targetConCount) err = updateLVMVolumeGroupPhaseIfNeeded(ctx, cl, lvg, v1alpha1.PhasePending) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to update the LVMVolumeGroup %s phase", lvg.Name)) + log.Error(err, "unable to update the LVMVolumeGroup phase") return true, err } err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionFalse, internal.TypeReady, internal.ReasonPending, "wait for conditions to got configured") if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeReady, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", "conditionType", internal.TypeReady) return true, err } return false, nil } - log.Info(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s conditions are fully configured. Check their states", lvg.Name)) + log.Info("the LVMVolumeGroup conditions are fully configured. Check their states") ready := true falseConditions := make([]string, 0, len(lvg.Status.Conditions)) for _, c := range lvg.Status.Conditions { - log.Debug(fmt.Sprintf("[reconcileLVGConditions] check condition %s of the LVMVolumeGroup %s", c.Type, lvg.Name)) - log.Trace(fmt.Sprintf("[reconcileLVGConditions] check condition %+v of the LVMVolumeGroup %s", c, lvg.Name)) + log := log.WithValues("conditionType", c.Type) + log.Debug("check condition") + log.Trace("check condition details", "condition", c) if c.Type == internal.TypeReady { - log.Debug(fmt.Sprintf("[reconcileLVGConditions] the condition %s of the LVMVolumeGroup %s is ours, skip it", c.Type, lvg.Name)) + log.Debug("the condition is ours, skip it") continue } if c.Status == metav1.ConditionTrue { - log.Debug(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s condition %s has status True", lvg.Name, c.Type)) + log.Debug("condition has status True") continue } if c.Reason == internal.ReasonCreating { ready = false falseConditions = nil - log.Debug(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s condition %s has Creating reason. Turn the LVMVolumeGroup Ready condition and phase to Pending", lvg.Name, c.Type)) + log.Debug("condition has Creating reason. Turn the LVMVolumeGroup Ready condition and phase to Pending") err = updateLVMVolumeGroupPhaseIfNeeded(ctx, cl, lvg, v1alpha1.PhasePending) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to update the LVMVolumeGroup %s phase", lvg.Name)) + log.Error(err, "unable to update the LVMVolumeGroup phase") return true, err } err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionFalse, internal.TypeReady, internal.ReasonPending, fmt.Sprintf("condition %s has Creating reason", c.Type)) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeReady, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", "conditionType", internal.TypeReady) return true, err } @@ -200,16 +208,16 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo if c.Reason == internal.ReasonTerminating { ready = false falseConditions = nil - log.Debug(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s condition %s has Terminating reason. Turn the LVMVolumeGroup Ready condition and phase to Terminating", lvg.Name, c.Type)) + log.Debug("condition has Terminating reason. Turn the LVMVolumeGroup Ready condition and phase to Terminating") err := updateLVMVolumeGroupPhaseIfNeeded(ctx, cl, lvg, v1alpha1.PhaseTerminating) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to update the LVMVolumeGroup %s phase", lvg.Name)) + log.Error(err, "unable to update the LVMVolumeGroup phase") return true, err } err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionFalse, internal.TypeReady, internal.ReasonTerminating, fmt.Sprintf("condition %s has Terminating reason", c.Type)) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeReady, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", "conditionType", internal.TypeReady) return true, err } break @@ -217,7 +225,7 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo if c.Status == metav1.ConditionFalse && !slices.Contains(acceptableReasons, c.Reason) { - log.Warning(fmt.Sprintf("[reconcileLVGConditions] the condition %s of the LVMVolumeGroup %s has status False and its reason is not acceptable", c.Type, lvg.Name)) + log.Warning("condition has status False and its reason is not acceptable", "reason", c.Reason) falseConditions = append(falseConditions, c.Type) ready = false } @@ -226,35 +234,35 @@ func reconcileLVGConditions(ctx context.Context, cl client.Client, log logger.Lo if len(falseConditions) > 0 { err := updateLVMVolumeGroupPhaseIfNeeded(ctx, cl, lvg, v1alpha1.PhaseNotReady) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to update the LVMVolumeGroup %s phase", lvg.Name)) + log.Error(err, "unable to update the LVMVolumeGroup phase") return true, err } err = updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionFalse, internal.TypeReady, "InvalidConditionStates", fmt.Sprintf("conditions %s has False status", strings.Join(falseConditions, ","))) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to add the condition %s to the LVMVolumeGroup %s", internal.TypeReady, lvg.Name)) + log.Error(err, "unable to add the condition to the LVMVolumeGroup", "conditionType", internal.TypeReady) return true, err } - log.Info(fmt.Sprintf("[reconcileLVGConditions] successfully reconciled the LVMVolumeGroup %s condition %s to NotReady", lvg.Name, internal.TypeReady)) + log.Info("successfully reconciled the LVMVolumeGroup condition to NotReady", "conditionType", internal.TypeReady) } if ready { - log.Info(fmt.Sprintf("[reconcileLVGConditions] the LVMVolumeGroup %s has no conditions with status False", lvg.Name)) + log.Info("the LVMVolumeGroup has no conditions with status False") - log.Debug(fmt.Sprintf("[reconcileLVGConditions] tries to add a condition %s to the LVMVolumeGroup %s", internal.TypeReady, lvg.Name)) + log.Debug("tries to add a condition to the LVMVolumeGroup", "conditionType", internal.TypeReady) err := updateLVGConditionIfNeeded(ctx, cl, log, lvg, metav1.ConditionTrue, internal.TypeReady, "ValidConditionStates", "every condition has a proper state") if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to update the condition %s of the LVMVolumeGroup %s", internal.TypeReady, lvg.Name)) + log.Error(err, "unable to update the condition of the LVMVolumeGroup", "conditionType", internal.TypeReady) return true, err } err = updateLVMVolumeGroupPhaseIfNeeded(ctx, cl, lvg, v1alpha1.PhaseReady) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVGConditions] unable to update the LVMVolumeGroup %s phase", lvg.Name)) + log.Error(err, "unable to update the LVMVolumeGroup phase") } - log.Info(fmt.Sprintf("[reconcileLVGConditions] successfully reconciled the LVMVolumeGroup %s phase to Ready", lvg.Name)) - log.Info(fmt.Sprintf("[reconcileLVGConditions] successfully reconciled conditions of the LVMVolumeGroup %s", lvg.Name)) + log.Info("successfully reconciled the LVMVolumeGroup phase to Ready") + log.Info("successfully reconciled conditions of the LVMVolumeGroup") } return false, nil diff --git a/images/sds-health-watcher-controller/pkg/controller/lvg_status_watcher.go b/images/sds-health-watcher-controller/pkg/controller/lvg_status_watcher.go index 686eedfc2..64ad37099 100644 --- a/images/sds-health-watcher-controller/pkg/controller/lvg_status_watcher.go +++ b/images/sds-health-watcher-controller/pkg/controller/lvg_status_watcher.go @@ -45,60 +45,66 @@ func RunLVGStatusWatcher( mgr manager.Manager, log logger.Logger, ) error { + log = log.WithName("RunLVGStatusWatcher") cl := mgr.GetClient() c, err := controller.New(LVGStatusWatcherCtrl, mgr, controller.Options{ Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunLVGStatusWatcher] Reconciler got a request %s", request.String())) + log := log.WithName("Reconcile").WithValues("lvgName", request.Name) + log.Info("Reconciler got a request") lvg := &v1alpha1.LVMVolumeGroup{} err := cl.Get(ctx, request.NamespacedName, lvg) if err != nil { if errors2.IsNotFound(err) { - log.Warning(fmt.Sprintf("[RunLVGStatusWatcher] seems like the LVMVolumeGroup was deleted as it is unable to get it, err: %s. Stop to reconcile the resource", err.Error())) + log.Warning("seems like the LVMVolumeGroup was deleted as it is unable to get it. Stop to reconcile the resource", + "error", err) return reconcile.Result{}, nil } - log.Error(err, fmt.Sprintf("[RunLVGStatusWatcher] unable to get the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to get the LVMVolumeGroup") return reconcile.Result{}, err } if lvg.Name == "" { - log.Info(fmt.Sprintf("[RunLVGStatusWatcher] seems like the LVMVolumeGroup for the request %s was deleted. Reconcile will stop.", request.Name)) + log.Info("seems like the LVMVolumeGroup for the request was deleted. Reconcile will stop") return reconcile.Result{}, nil } + log = log.WithValues("lvgName", lvg.Name) err = reconcileLVGStatus(ctx, cl, log, lvg) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVGStatusWatcher] unable to reconcile the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to reconcile the LVMVolumeGroup") return reconcile.Result{}, err } - log.Info(fmt.Sprintf("[RunLVGStatusWatcher] Reconciler successfully reconciled the LVMVolumeGroup %s", lvg.Name)) + log.Info("Reconciler successfully reconciled the LVMVolumeGroup") return reconcile.Result{}, nil }), }) if err != nil { - log.Error(err, "[RunLVGStatusWatcher] unable to create a controller") + log.Error(err, "unable to create a controller") return err } err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMVolumeGroup{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroup, reconcile.Request]{ CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVGStatusWatcher] got a create event for the LVMVolumeGroup %s", e.Object.GetName())) + log := log.WithName("CreateFunc").WithValues("lvgName", e.Object.GetName()) + log.Info("got a create event for the LVMVolumeGroup") request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("[RunLVGStatusWatcher] CreateFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.Object.GetName())) + log.Info("CreateFunc added a request for the LVMVolumeGroup to the Reconcilers queue") }, UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroup], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVGStatusWatcher] got an update event for the LVMVolumeGroup %s", e.ObjectNew.GetName())) + log := log.WithName("UpdateFunc").WithValues("lvgName", e.ObjectNew.GetName()) + log.Info("got an update event for the LVMVolumeGroup") request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("[RunLVGStatusWatcher] UpdateFunc added a request for the LVMVolumeGroup %s to the Reconcilers queue", e.ObjectNew.GetName())) + log.Info("UpdateFunc added a request for the LVMVolumeGroup to the Reconcilers queue") }, })) if err != nil { - log.Error(err, "[RunLVGStatusWatcher] unable to watch the events") + log.Error(err, "unable to watch the events") return err } @@ -106,14 +112,17 @@ func RunLVGStatusWatcher( } func reconcileLVGStatus(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup) error { - log.Debug(fmt.Sprintf("[reconcileLVGStatus] starts to reconcile the LVMVolumeGroup %s", lvg.Name)) + log = log.WithName("reconcileLVGStatus") + log.Debug("starts to reconcile the LVMVolumeGroup") shouldUpdate := false - log.Debug(fmt.Sprintf("[reconcileLVGStatus] starts to check ThinPools Ready status for the LVMVolumeGroup %s", lvg.Name)) + log.Debug("starts to check ThinPools Ready status for the LVMVolumeGroup") totalTPCount := getUniqueThinPoolCount(lvg.Spec.ThinPools, lvg.Status.ThinPools) actualTPCount := getActualThinPoolReadyCount(lvg.Status.ThinPools) if totalTPCount > actualTPCount { - log.Warning(fmt.Sprintf("[reconcileLVGStatus] some ThinPools of the LVMVolumeGroup %s is not Ready", lvg.Name)) + log.Warning("some ThinPools of the LVMVolumeGroup is not Ready", + "totalTPCount", totalTPCount, + "actualTPCount", actualTPCount) } tpReady := fmt.Sprintf("%d/%d", actualTPCount, totalTPCount) if lvg.Status.ThinPoolReady != tpReady { diff --git a/images/sds-health-watcher-controller/pkg/controller/lvm_volume_group_set_watcher.go b/images/sds-health-watcher-controller/pkg/controller/lvm_volume_group_set_watcher.go index c503778c7..f5a9606f3 100644 --- a/images/sds-health-watcher-controller/pkg/controller/lvm_volume_group_set_watcher.go +++ b/images/sds-health-watcher-controller/pkg/controller/lvm_volume_group_set_watcher.go @@ -58,64 +58,69 @@ func RunLVMVolumeGroupSetWatcher( cfg config.Options, metrics monitoring.Metrics, ) error { + log = log.WithName("RunLVMVolumeGroupSetWatcher") cl := mgr.GetClient() c, err := controller.New(LVMVolumeGroupSetCtrlName, mgr, controller.Options{ Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] tries to reconcile the request of the LVMVolumeGroupSet %s", request.Name)) + log := log.WithName("Reconcile").WithValues("lvgSetName", request.Name) + log.Info("tries to reconcile the request of the LVMVolumeGroupSet") lvgSet := &v1alpha1.LVMVolumeGroupSet{} err := cl.Get(ctx, request.NamespacedName, lvgSet) if err != nil { if errors.IsNotFound(err) { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] seems like the LVMVolumeGroupSet %s has been deleted. Stop the reconcile", lvgSet.Name)) + log.Warning("seems like the LVMVolumeGroupSet has been deleted. Stop the reconcile") return reconcile.Result{}, nil } - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] unable to get the LVMVolumeGroupSet %s", request.Name)) + log.Error(err, "unable to get the LVMVolumeGroupSet") return reconcile.Result{}, err } + log = log.WithValues("lvgSetName", lvgSet.Name) shouldRequeue, err := reconcileLVMVolumeGroupSet(ctx, cl, log, metrics, lvgSet) if err != nil { - log.Error(err, fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] unable to reconcile the LVMVolumeGroupSet %s", lvgSet.Name)) + log.Error(err, "unable to reconcile the LVMVolumeGroupSet") return reconcile.Result{}, err } if shouldRequeue { - log.Warning(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] the LVMVolumeGroupSet %s request should be requeued in %s", lvgSet.Name, cfg.ScanIntervalSec.String())) + log.Warning("the LVMVolumeGroupSet request should be requeued", "requeueIn", cfg.ScanIntervalSec) return reconcile.Result{RequeueAfter: cfg.ScanIntervalSec}, nil } - log.Info(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] successfully reconciled the request of the LVMVolumeGroupSet %s", request.Name)) + log.Info("successfully reconciled the request of the LVMVolumeGroupSet") return reconcile.Result{}, nil }), }) if err != nil { - log.Error(err, "[RunLVMVolumeGroupSetWatcher] unable to create the controller") + log.Error(err, "unable to create the controller") return err } err = c.Watch(source.Kind(mgr.GetCache(), &v1alpha1.LVMVolumeGroupSet{}, handler.TypedFuncs[*v1alpha1.LVMVolumeGroupSet, reconcile.Request]{ CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*v1alpha1.LVMVolumeGroupSet], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] createFunc got a create event for the LVMVolumeGroupSet, name: %s", e.Object.GetName())) + log := log.WithName("CreateFunc").WithValues("lvgSetName", e.Object.GetName()) + log.Info("createFunc got a create event for the LVMVolumeGroupSet") request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] createFunc added a request for the LVMVolumeGroupSet %s to the Reconcilers queue", e.Object.GetName())) + log.Info("createFunc added a request for the LVMVolumeGroupSet to the Reconcilers queue") }, UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*v1alpha1.LVMVolumeGroupSet], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] UpdateFunc got a update event for the LVMVolumeGroupSet %s", e.ObjectNew.GetName())) + log := log.WithName("UpdateFunc").WithValues("lvgSetName", e.ObjectNew.GetName()) + log.Info("UpdateFunc got an update event for the LVMVolumeGroupSet") if !shouldLVGSetWatcherReconcileUpdateEvent(e.ObjectOld, e.ObjectNew) { - log.Info(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] update event for the LVMVolumeGroupSet %s should not be reconciled as not target changed were made", e.ObjectNew.Name)) + log.Info("update event for the LVMVolumeGroupSet should not be reconciled as not target changed were made") return } request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("[RunLVMVolumeGroupSetWatcher] updateFunc added a request for the LVMVolumeGroupSet %s to the Reconcilers queue", e.ObjectNew.Name)) + log.Info("updateFunc added a request for the LVMVolumeGroupSet to the Reconcilers queue") }, })) if err != nil { - log.Error(err, "[RunLVMVolumeGroupSetWatcher] the controller is unable to watch the LVMVolumeGroupSet resources") + log.Error(err, "the controller is unable to watch the LVMVolumeGroupSet resources") return err } @@ -127,24 +132,25 @@ func shouldLVGSetWatcherReconcileUpdateEvent(oldLVG, newLVG *v1alpha1.LVMVolumeG } func reconcileLVMVolumeGroupSet(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvgSet *v1alpha1.LVMVolumeGroupSet) (bool, error) { - log.Debug(fmt.Sprintf("[reconcileLVMVolumeGroupSet] starts the reconciliation of the LVMVolumeGroupSet %s", lvgSet.Name)) + log = log.WithName("reconcileLVMVolumeGroupSet") + log.Debug("starts the reconciliation of the LVMVolumeGroupSet") err := updateLVMVolumeGroupSetPhaseIfNeeded(ctx, cl, log, lvgSet, v1alpha1.PhasePending, reasonWorkInProgress) if err != nil { return false, err } - log.Debug(fmt.Sprintf("[reconcileLVMVolumeGroupSet] tries to get nodes by the LVMVolumeGroupSet %s nodeSelector", lvgSet.Name)) + log.Debug("tries to get nodes by the LVMVolumeGroupSet nodeSelector") nodes, err := GetNodes(ctx, cl, metrics, lvgSet.Spec.NodeSelector) if err != nil { return false, err } - log.Debug(fmt.Sprintf("[reconcileLVMVolumeGroupSet] successfully got nodes by the LVMVolumeGroupSet %s nodeSelector", lvgSet.Name)) - log.Trace(fmt.Sprintf("[reconcileLVMVolumeGroupSet] nodes: %+v", nodes)) + log.Debug("successfully got nodes by the LVMVolumeGroupSet nodeSelector") + log.Trace("nodes", "nodes", nodes) - log.Debug(fmt.Sprintf("[reconcileLVMVolumeGroupSet] starts to validate the LVMVolumeGroupSet %s nodes", lvgSet.Name)) + log.Debug("starts to validate the LVMVolumeGroupSet nodes") valid, reason := validateLVMVolumeGroupSetNodes(nodes) if !valid { - log.Warning(fmt.Sprintf("[reconcileLVMVolumeGroupSet] the LVMVolumeGroupSet %s nodes are invalid: %s", lvgSet.Name, reason)) + log.Warning("the LVMVolumeGroupSet nodes are invalid", "reason", reason) err = updateLVMVolumeGroupSetPhaseIfNeeded(ctx, cl, log, lvgSet, phaseNotCreated, reason) if err != nil { return false, err @@ -152,19 +158,19 @@ func reconcileLVMVolumeGroupSet(ctx context.Context, cl client.Client, log logge return true, nil } - log.Debug(fmt.Sprintf("[reconcileLVMVolumeGroupSet] the LVMVolumeGroupSet %s nodes are valid", lvgSet.Name)) + log.Debug("the LVMVolumeGroupSet nodes are valid") - log.Debug(fmt.Sprintf("[reconcileLVMVolumeGroupSet] tries to provide LVMVolumeGroups by the LVMVolumeGroupSet %s", lvgSet.Name)) + log.Debug("tries to provide LVMVolumeGroups by the LVMVolumeGroupSet") err = provideLVMVolumeGroupsBySet(ctx, cl, log, metrics, lvgSet, nodes) if err != nil { - log.Error(err, fmt.Sprintf("[reconcileLVMVolumeGroupSet] unable to provide LVMVolumeGroups by LVMVolumeGroupSet %s", lvgSet.Name)) + log.Error(err, "unable to provide LVMVolumeGroups by LVMVolumeGroupSet") updErr := updateLVMVolumeGroupSetPhaseIfNeeded(ctx, cl, log, lvgSet, phaseNotCreated, err.Error()) if updErr != nil { return false, updErr } return false, err } - log.Debug(fmt.Sprintf("[reconcileLVMVolumeGroupSet] successfully provided LVMVolumeGroups by the LVMVolumeGroupSet %s", lvgSet.Name)) + log.Debug("successfully provided LVMVolumeGroups by the LVMVolumeGroupSet") err = updateLVMVolumeGroupSetPhaseIfNeeded(ctx, cl, log, lvgSet, phaseCreated, "") if err != nil { @@ -175,16 +181,23 @@ func reconcileLVMVolumeGroupSet(ctx context.Context, cl client.Client, log logge } func provideLVMVolumeGroupsBySet(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvgSet *v1alpha1.LVMVolumeGroupSet, nodes map[string]v1.Node) error { + log = log. + WithName("provideLVMVolumeGroupsBySet"). + WithValues( + "lvgSetName", lvgSet.Name, + "strategy", lvgSet.Spec.Strategy) //nolint:gocritic switch lvgSet.Spec.Strategy { case strategyPerNode: - log.Debug(fmt.Sprintf("[provideLVMVolumeGroupsBySet] the LVMVolumeGroupSet %s has strategy %s, tries to provide the LVMVolumeGroups", lvgSet.Name, strategyPerNode)) + log.Debug("the LVMVolumeGroupSet has strategy, tries to provide the LVMVolumeGroups", + "strategy", strategyPerNode) err := provideLVMVolumeGroupsPerNode(ctx, cl, log, metrics, lvgSet, nodes) if err != nil { - log.Error(err, fmt.Sprintf("[provideLVMVolumeGroupsBySet] unable to provide LVMVolumeGroups by the LVMVolumeGroupSet %s with strategy %s", lvgSet.Name, strategyPerNode)) + log.Error(err, "unable to provide LVMVolumeGroups by the LVMVolumeGroupSet with strategy") return err } - log.Debug(fmt.Sprintf("[provideLVMVolumeGroupsBySet] successfully provided LVMVolumeGroups by the LVMVolumeGroupSet %s with strategy %s", lvgSet.Name, strategyPerNode)) + log.Debug("successfully provided LVMVolumeGroups by the LVMVolumeGroupSet with strategy", + "strategy", strategyPerNode) default: return fmt.Errorf("LVMVolumeGroupSet %s strategy %s is not implemented", lvgSet.Name, lvgSet.Spec.Strategy) } @@ -193,44 +206,46 @@ func provideLVMVolumeGroupsBySet(ctx context.Context, cl client.Client, log logg } func provideLVMVolumeGroupsPerNode(ctx context.Context, cl client.Client, log logger.Logger, metrics monitoring.Metrics, lvgSet *v1alpha1.LVMVolumeGroupSet, nodes map[string]v1.Node) error { - log.Debug("[provideLVMVolumeGroupsPerNode] tries to get LVMVolumeGroups") + log = log.WithName("provideLVMVolumeGroupsPerNode") + log.Debug("tries to get LVMVolumeGroups") currentLVGs, err := GetLVMVolumeGroups(ctx, cl, metrics) if err != nil { - log.Error(err, "[provideLVMVolumeGroupsPerNode] unable to get LVMVolumeGroups") + log.Error(err, "unable to get LVMVolumeGroups") return err } - log.Debug("[provideLVMVolumeGroupsPerNode] successfully got LVMVolumeGroups") - log.Trace(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] current LVMVolumeGroups: %+v", currentLVGs)) + log.Debug("successfully got LVMVolumeGroups") + log.Trace("current LVMVolumeGroups", "currentLVGs", currentLVGs) for _, node := range nodes { configuredLVG := configureLVGBySet(lvgSet, node) - log.Trace(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] configurated LVMVolumeGroup: %+v", configuredLVG)) + log := log.WithValues("lvgName", configuredLVG.Name, "nodeName", node.Name) + log.Trace("configurated LVMVolumeGroup", "configuredLVG", configuredLVG) currentLVG := matchConfiguredLVGWithExistingOne(configuredLVG, currentLVGs) if currentLVG != nil { - log.Debug(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] tries to update the LVMVolumeGroup %s", currentLVG.Name)) + log.Debug("tries to update the LVMVolumeGroup") err = updateLVMVolumeGroupByConfiguredFromSet(ctx, cl, currentLVG, configuredLVG) if err != nil { - log.Error(err, fmt.Sprintf("[provideLVMVolumeGroupsPerNode] unable to update the LVMVolumeGroup %s", currentLVG.Name)) + log.Error(err, "unable to update the LVMVolumeGroup") return err } - log.Info(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] LVMVolumeGroup %s has been successfully updated", currentLVG.Name)) + log.Info("LVMVolumeGroup has been successfully updated") } else { - log.Debug(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] tries to create the LVMVolumeGroup %s", configuredLVG.Name)) + log.Debug("tries to create the LVMVolumeGroup") err = createLVMVolumeGroup(ctx, cl, configuredLVG) if err != nil { - log.Error(err, fmt.Sprintf("[provideLVMVolumeGroupsPerNode] unable to create the LVMVolumeGroup %s", configuredLVG.Name)) + log.Error(err, "unable to create the LVMVolumeGroup") return err } - log.Info(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] the LVMVolumeGroup %s has been created", configuredLVG.Name)) - log.Debug(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] tries to update the LVMVolumeGroupSet %s status by the created LVMVolumeGroup %s", lvgSet.Name, configuredLVG.Name)) + log.Info("the LVMVolumeGroup has been created") + log.Debug("tries to update the LVMVolumeGroupSet status by the created LVMVolumeGroup") err = updateLVMVolumeGroupSetStatusByLVGIfNeeded(ctx, cl, log, lvgSet, configuredLVG, nodes) if err != nil { - log.Error(err, fmt.Sprintf("[provideLVMVolumeGroupsPerNode] unable to update the LVMVolumeGroupSet %s", lvgSet.Name)) + log.Error(err, "unable to update the LVMVolumeGroupSet") return err } - log.Debug(fmt.Sprintf("[provideLVMVolumeGroupsPerNode] successfully updated the LVMVolumeGroupSet %s status by the created LVMVolumeGroup %s", lvgSet.Name, configuredLVG.Name)) + log.Debug("successfully updated the LVMVolumeGroupSet status by the created LVMVolumeGroup") } } @@ -255,14 +270,15 @@ func updateLVMVolumeGroupByConfiguredFromSet(ctx context.Context, cl client.Clie } func updateLVMVolumeGroupSetStatusByLVGIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvgSet *v1alpha1.LVMVolumeGroupSet, lvg *v1alpha1.LVMVolumeGroup, nodes map[string]v1.Node) error { + log = log.WithName("updateLVMVolumeGroupSetStatusByLVGIfNeeded").WithValues("lvgSetName", lvgSet.Name, "lvgName", lvg.Name) for _, createdLVG := range lvgSet.Status.CreatedLVGs { if createdLVG.LVMVolumeGroupName == lvg.Name { - log.Debug(fmt.Sprintf("[updateLVMVolumeGroupSetStatusByLVGIfNeeded] no need to update the LVMVolumeGroupSet status %s by the LVMVolumeGroup %s", lvgSet.Name, lvg.Name)) + log.Debug("no need to update the LVMVolumeGroupSet status by the LVMVolumeGroup") return nil } } - log.Debug(fmt.Sprintf("[updateLVMVolumeGroupSetStatusByLVGIfNeeded] the LVMVolumeGroupSet status %s should be updated by the LVMVolumeGroup %s", lvgSet.Name, lvg.Name)) + log.Debug("the LVMVolumeGroupSet status should be updated by the LVMVolumeGroup") if cap(lvgSet.Status.CreatedLVGs) == 0 { lvgSet.Status.CreatedLVGs = make([]v1alpha1.LVMVolumeGroupSetStatusLVG, 0, len(nodes)) } @@ -331,22 +347,30 @@ func GetNodes(ctx context.Context, cl client.Client, metrics monitoring.Metrics, } func updateLVMVolumeGroupSetPhaseIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvgSet *v1alpha1.LVMVolumeGroupSet, phase, reason string) error { - log.Debug(fmt.Sprintf("[updateLVMVolumeGroupSetPhaseIfNeeded] tries to update the LVMVolumeGroupSet %s status phase to %s and reason to %s", lvgSet.Name, phase, reason)) + log = log. + WithName("updateLVMVolumeGroupSetPhaseIfNeeded"). + WithValues( + "lvgSetName", lvgSet.Name, + "phase", phase, + "reason", reason) + log.Debug("tries to update the LVMVolumeGroupSet status phase and reason") if lvgSet.Status.Phase == phase && lvgSet.Status.Reason == reason { - log.Debug(fmt.Sprintf("[updateLVMVolumeGroupSetPhaseIfNeeded] no need to update phase or reason of the LVMVolumeGroupSet %s as they are same", lvgSet.Name)) + log.Debug("no need to update phase or reason of the LVMVolumeGroupSet as they are same") return nil } - log.Debug(fmt.Sprintf("[updateLVMVolumeGroupSetPhaseIfNeeded] the LVMVolumeGroupSet %s status phase %s and reason %s should be updated to the phase %s and reason %s", lvgSet.Name, lvgSet.Status.Phase, lvgSet.Status.Reason, phase, reason)) + log.Debug("the LVMVolumeGroupSet status phase and reason should be updated", + "currentPhase", lvgSet.Status.Phase, + "currentReason", lvgSet.Status.Reason) lvgSet.Status.Phase = phase lvgSet.Status.Reason = reason err := cl.Status().Update(ctx, lvgSet) if err != nil { - log.Error(err, fmt.Sprintf("[updateLVMVolumeGroupSetPhaseIfNeeded] unable to update the LVMVolumeGroupSet %s", lvgSet.Name)) + log.Error(err, "unable to update the LVMVolumeGroupSet") return err } - log.Debug(fmt.Sprintf("[updateLVMVolumeGroupSetPhaseIfNeeded] successfully updated the LVMVolumeGroupSet %s to phase %s and reason %s", lvgSet.Name, phase, reason)) + log.Debug("successfully updated the LVMVolumeGroupSet to phase and reason") return nil } diff --git a/images/sds-health-watcher-controller/pkg/controller/mc_watcher.go b/images/sds-health-watcher-controller/pkg/controller/mc_watcher.go index cc104f3c4..754b59da4 100644 --- a/images/sds-health-watcher-controller/pkg/controller/mc_watcher.go +++ b/images/sds-health-watcher-controller/pkg/controller/mc_watcher.go @@ -19,7 +19,6 @@ package controller import ( "context" "encoding/json" - "fmt" "github.com/cloudflare/cfssl/log" "k8s.io/apimachinery/pkg/types" @@ -47,37 +46,47 @@ func RunMCWatcher( mgr manager.Manager, log logger.Logger, ) error { + log = log.WithName("RunMCWatcher") cl := mgr.GetClient() c, err := controller.New(MCWatcherCtrlName, mgr, controller.Options{ Reconciler: reconcile.Func(func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log.Info(fmt.Sprintf("[RunMCWatcher] Reconciler got a request %s", request.String())) + log := log. + WithName("Reconcile"). + WithValues("moduleConfigName", request.Name) + log.Info("Reconciler got a request") checkMCThinPoolsEnabled(ctx, cl) return reconcile.Result{}, nil }), }) if err != nil { - log.Error(err, "[RunMCWatcher] unable to create a controller") + log.Error(err, "unable to create a controller") return err } err = c.Watch(source.Kind(mgr.GetCache(), &d8commonapi.ModuleConfig{}, handler.TypedFuncs[*d8commonapi.ModuleConfig, reconcile.Request]{ CreateFunc: func(_ context.Context, e event.TypedCreateEvent[*d8commonapi.ModuleConfig], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunMCWatcher] got a create event for the ModuleConfig %s", e.Object.GetName())) + log := log. + WithName("CreateFunc"). + WithValues("moduleConfigName", e.Object.GetName()) + log.Info("got a create event for the ModuleConfig") request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.Object.GetNamespace(), Name: e.Object.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("[RunMCWatcher] added the ModuleConfig %s to the Reconcilers queue", e.Object.GetName())) + log.Info("added the ModuleConfig to the Reconcilers queue") }, UpdateFunc: func(_ context.Context, e event.TypedUpdateEvent[*d8commonapi.ModuleConfig], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { - log.Info(fmt.Sprintf("[RunMCWatcher] got a update event for the ModuleConfig %s", e.ObjectNew.GetName())) + log := log. + WithName("UpdateFunc"). + WithValues("moduleConfigName", e.ObjectNew.GetName()) + log.Info("got an update event for the ModuleConfig") request := reconcile.Request{NamespacedName: types.NamespacedName{Namespace: e.ObjectNew.GetNamespace(), Name: e.ObjectNew.GetName()}} q.Add(request) - log.Info(fmt.Sprintf("[RunMCWatcher] added the ModuleConfig %s to the Reconcilers queue", e.ObjectNew.GetName())) + log.Info("added the ModuleConfig to the Reconcilers queue") }, })) if err != nil { - log.Error(err, "[RunMCWatcher] unable to watch the events") + log.Error(err, "unable to watch the events") return err } diff --git a/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher.go b/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher.go index b643584ad..8b58bf67d 100644 --- a/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher.go +++ b/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher.go @@ -19,7 +19,6 @@ package controller import ( "context" "errors" - "fmt" "strings" "time" @@ -51,74 +50,85 @@ func RunSdsInfraWatcher( metrics monitoring.Metrics, log logger.Logger, ) { - log.Info("[RunSdsInfraWatcher] starts the work") + log = log.WithName("RunSdsInfraWatcher") + log.Info("starts the work") cl := mgr.GetClient() go func() { for { time.Sleep(cfg.ScanIntervalSec) - log.Info("[RunSdsInfraWatcher] starts the reconciliation loop") + log.Info("starts the reconciliation loop") - log.Debug("[RunSdsInfraWatcher] tries to get LVMVolumeGroups") + log.Debug("tries to get LVMVolumeGroups") lvgs, err := GetLVMVolumeGroups(ctx, cl, metrics) if err != nil { - log.Error(err, "[RunSdsInfraWatcher] unable to get LVMVolumeGroups") + log.Error(err, "unable to get LVMVolumeGroups") continue } - log.Debug("[RunSdsInfraWatcher] successfully got LVMVolumeGroups") + log.Debug("successfully got LVMVolumeGroups") if len(lvgs) == 0 { - log.Info("[RunSdsInfraWatcher] no LVMVolumeGroups found") + log.Info("no LVMVolumeGroups found") continue } for _, lvg := range lvgs { - log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] LVMVolumeGroup %s conditions: %+v", lvg.Name, lvg.Status.Conditions)) + log.Trace("LVMVolumeGroup conditions", + "lvgName", lvg.Name, + "conditions", lvg.Status.Conditions) } - log.Info("[RunSdsInfraWatcher] LVMVolumeGroups found. Starts to check their health") - log.Info("[RunSdsInfraWatcher] check if every LVMVolumeGroup node does exist") + log.Info("LVMVolumeGroups found. Starts to check their health") + log.Info("check if every LVMVolumeGroup node does exist") lvgNodeNames := getNodeNamesFromLVGs(lvgs) - log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] used nodes %v", lvgNodeNames)) + log.Trace("used nodes", "nodes", lvgNodeNames) - log.Debug("[RunSdsInfraWatcher] tries to collect nodes used by LVMVolumeGroups") + log.Debug("tries to collect nodes used by LVMVolumeGroups") usedNodes, missedNodes, err := getNodesByNames(ctx, cl, lvgNodeNames) if err != nil { - log.Error(err, "[RunSdsInfraWatcher] unable to get nodes") + log.Error(err, "unable to get nodes") continue } - log.Debug("[RunSdsInfraWatcher] successfully collected nodes used by LVMVolumeGroups") + log.Debug("successfully collected nodes used by LVMVolumeGroups") if len(missedNodes) > 0 { - log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] some LVMVolumeGroups use missing nodes: %v. Turn those LVMVolumeGroups condition NodeReady to False", missedNodes)) + log.Warning("some LVMVolumeGroups use missing nodes. Turn those LVMVolumeGroups condition NodeReady to False", + "missedNodes", missedNodes) lvgsWithMissedNodes := findLVMVolumeGroupsByNodeNames(lvgs, missedNodes) for _, lvg := range lvgsWithMissedNodes { + log := log.WithValues( + "lvgName", lvg.Name, + "conditionType", nodeReadyType) err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, nodeReadyType, "MissingNode", "unable to find the used nodes") if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup") continue } - log.Info(fmt.Sprintf("[RunSdsInfraWatcher] successfully reconciled the LVMVolumeGroup %s phase and condition %s due to missing nodes", lvg.Name, nodeReadyType)) + log.Info("successfully reconciled the LVMVolumeGroup phase and condition due to missing nodes") } } else { - log.Info("[RunSdsInfraWatcher] no missing nodes used by LVMVolumeGroups were found") + log.Info("no missing nodes used by LVMVolumeGroups were found") } - log.Debug("[RunSdsInfraWatcher] check if every used node is Ready") + log.Debug("check if every used node is Ready") notReadyNodes := getNotReadyNodes(usedNodes) if len(notReadyNodes) > 0 { - log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] some LVMVolumeGroup nodes are not Ready: %v. Turn those LVMVolumeGroups condition NodeReady to False", notReadyNodes)) + log.Warning("some LVMVolumeGroup nodes are not Ready. Turn those LVMVolumeGroups condition NodeReady to False", + "notReadyNodes", notReadyNodes) lvgsNotReady := findLVMVolumeGroupsByNodeNames(lvgs, notReadyNodes) for _, lvg := range lvgsNotReady { + log := log.WithValues( + "lvgName", lvg.Name, + "conditionType", nodeReadyType) err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, nodeReadyType, "NodeNotReady", "some of used nodes is not ready") if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup") continue } - log.Info(fmt.Sprintf("[RunSdsInfraWatcher] successfully reconciled the LVMVolumeGroup %s phase and condition %s due to nodes are not in a Running state", lvg.Name, nodeReadyType)) + log.Info("successfully reconciled the LVMVolumeGroup phase and condition due to nodes are not in a Running state") } } else { - log.Info("[RunSdsInfraWatcher] every LVMVolumeGroup node is in a Running state") + log.Info("every LVMVolumeGroup node is in a Running state") } // Create a list of LVGs that are not in notReadyNodes and missedNodes @@ -140,8 +150,11 @@ func RunSdsInfraWatcher( firstName := lvg.Status.Nodes[0].Name for _, node := range lvg.Status.Nodes[1:] { if node.Name != firstName { - errMsg := fmt.Sprintf("[RunSdsInfraWatcher] found different node names in lvg.Status.Nodes for %s: %+v", lvg.Name, lvg.Status.Nodes) - log.Error(errors.New(errMsg), errMsg) + log.Error( + errors.New("found different node names in lvg.Status.Nodes"), + "found different node names in lvg.Status.Nodes", + "lvgName", lvg.Name, + "nodes", lvg.Status.Nodes) multipleNodes = true } } @@ -164,67 +177,79 @@ func RunSdsInfraWatcher( // Update status for LVGs that are not in problem nodes for _, lvg := range lvgsToUpdate { - log.Debug(fmt.Sprintf("[RunSdsInfraWatcher] tries to update the LVMVolumeGroup %s condition %s to True", lvg.Name, nodeReadyType)) + log := log.WithValues( + "lvgName", lvg.Name, + "conditionType", nodeReadyType) + log.Debug("tries to update the LVMVolumeGroup condition to True") err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, nodeReadyType, "NodesReady", "selected nodes were found in the cluster and have Ready state") if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup") continue } - log.Info(fmt.Sprintf("[RunSdsInfraWatcher] successfully reconciled the LVMVolumeGroup %s condition %s to status True", lvg.Name, nodeReadyType)) + log.Info("successfully reconciled the LVMVolumeGroup condition to status True") } - log.Info("[RunSdsInfraWatcher] check if every sds-node-configurator agent's pod is healthy") - log.Debug(fmt.Sprintf("[RunSdsInfraWatcher] tries to get pods by the selector %v", sdsNodeConfiguratorSelector)) + log.Info("check if every sds-node-configurator agent's pod is healthy") + log.Debug("tries to get pods by the selector", + "selector", sdsNodeConfiguratorSelector) sdsPods, err := getPodsBySelector(ctx, cl, sdsNodeConfiguratorSelector) if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to get pods by the seletor %v", sdsNodeConfiguratorSelector)) + log.Error(err, "unable to get pods by the selector", + "selector", sdsNodeConfiguratorSelector) continue } - log.Debug(fmt.Sprintf("[RunSdsInfraWatcher] successfully got pods by the selector %v", sdsNodeConfiguratorSelector)) + log.Debug("successfully got pods by the selector", + "selector", sdsNodeConfiguratorSelector) if len(sdsPods) == 0 { - log.Warning("[RunSdsInfraWatcher] no sds-node-configurator agent's pods found, update every LVMVolumeGroup condition AgentReady to False") + log.Warning("no sds-node-configurator agent's pods found, update every LVMVolumeGroup condition AgentReady to False") for _, lvg := range lvgs { - log.Debug(fmt.Sprintf("[RunSdsInfraWatcher] tries to update the LVMVolumeGroup %s condition %s to status False due to a missing agent's pod", lvg.Name, agentReadyType)) + log := log.WithValues( + "lvgName", lvg.Name, + "conditionType", agentReadyType) + log.Debug("tries to update the LVMVolumeGroup condition to status False due to a missing agent's pod") err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, agentReadyType, "NoPod", "unable to find any agent's pod") if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition %s to the LVMVolumeGroup %s", agentReadyType, lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup") continue } - log.Info(fmt.Sprintf("[RunSdsInfraWatcher] successfully reconciled the LVMVolumeGroup %s phase and condition %s due to missing pods", lvg.Name, agentReadyType)) + log.Info("successfully reconciled the LVMVolumeGroup phase and condition due to missing pods") } - log.Info("[RunSdsInfraWatcher] successfully updated every LVMVolumeGroup status.phase to NotReady due to no sds-node-configurator agent's pods are running") + log.Info("successfully updated every LVMVolumeGroup status.phase to NotReady due to no sds-node-configurator agent's pods are running") continue } - log.Debug("[RunSdsInfraWatcher] sds-node-configurator agent's pods were found. Check if some pods are missing") + log.Debug("sds-node-configurator agent's pods were found. Check if some pods are missing") for _, p := range sdsPods { - log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] found a pod: %s", p.Name)) + log.Trace("found a pod", "podName", p.Name) } unmanagedNodes := getNodeNamesWithoutAgent(usedNodes, sdsPods) if len(unmanagedNodes) > 0 { - log.Warning("[RunSdsInfraWatcher] some LVMVolumeGroups are not managed due to corresponding sds-node-configurator agent's pods are not running. Turn such LVMVolumeGroups to NotReady phase") - log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] nodes without the agent: %v", unmanagedNodes)) + log.Warning("some LVMVolumeGroups are not managed due to corresponding sds-node-configurator agent's pods are not running. Turn such LVMVolumeGroups to NotReady phase") + log.Trace("nodes without the agent", "unmanagedNodes", unmanagedNodes) lvgsWithoutPod := findLVMVolumeGroupsByNodeNames(lvgs, unmanagedNodes) for _, lvg := range lvgsWithoutPod { - log.Debug(fmt.Sprintf("[RunSdsInfraWatcher] tries to add a condition %s status False due the LVMVolumeGroup %s node is not managed by a sds-node-configurator agent's pod", agentReadyType, lvg.Name)) + log := log.WithValues( + "lvgName", lvg.Name, + "conditionType", agentReadyType) + log.Debug("tries to add a condition status False due the LVMVolumeGroup node is not managed by a sds-node-configurator agent's pod") err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, agentReadyType, "PodNotFound", "unable to find an agent's pod") if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup") continue } - log.Info(fmt.Sprintf("[RunSdsInfraWatcher] successfully reconciled the LVMVolumeGroup %s condition %s due to missing pods", lvg.Name, agentReadyType)) + log.Info("successfully reconciled the LVMVolumeGroup condition due to missing pods") } } else { - log.Info("[RunSdsInfraWatcher] no missing sds-node-configurator agent's pods were found") + log.Info("no missing sds-node-configurator agent's pods were found") } - log.Debug("[RunSdsInfraWatcher] check if every agent's pod is in a Ready state") + log.Debug("check if every agent's pod is in a Ready state") notReadyPods := getNotReadyPods(sdsPods) if len(notReadyPods) > 0 { podsNames := make([]string, 0, len(notReadyPods)) @@ -232,19 +257,23 @@ func RunSdsInfraWatcher( podsNames = append(podsNames, name) } - log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] there is some sds-node-configurator agent's pods that is not Ready, pods: %s. Turn the LVMVolumeGroups condition AgentReady to False", strings.Join(podsNames, ","))) + log.Warning("there is some sds-node-configurator agent's pods that is not Ready. Turn the LVMVolumeGroups condition AgentReady to False", "pods", strings.Join(podsNames, ",")) nodeNames := getNodeNamesFromPods(notReadyPods) - log.Trace(fmt.Sprintf("[RunSdsInfraWatcher] node names with not Ready sds-node-configurator agent's pods: %v", nodeNames)) + log.Trace("node names with not Ready sds-node-configurator agent's pods", + "nodeNames", nodeNames) lvgsNotReady := findLVMVolumeGroupsByNodeNames(lvgs, nodeNames) for _, lvg := range lvgsNotReady { - log.Warning(fmt.Sprintf("[RunSdsInfraWatcher] the LVMVolumeGroup %s is managed by not Ready pod, turns the condition %s to False", lvg.Name, agentReadyType)) + log := log.WithValues( + "lvgName", lvg.Name, + "conditionType", agentReadyType) + log.Warning("the LVMVolumeGroup is managed by not Ready pod, turns the condition to False") err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionFalse, agentReadyType, "PodNotReady", "the pod is not Ready") if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup") continue } - log.Info(fmt.Sprintf("[RunSdsInfraWatcher] successfully reconciled the LVMVolumeGroup %s phase and condition %s due to the pod is not Ready", lvg.Name, agentReadyType)) + log.Info("successfully reconciled the LVMVolumeGroup phase and condition due to the pod is not Ready") } } @@ -266,8 +295,11 @@ func RunSdsInfraWatcher( firstName := lvg.Status.Nodes[0].Name for _, node := range lvg.Status.Nodes[1:] { if node.Name != firstName { - errMsg := fmt.Sprintf("[RunSdsInfraWatcher] found different node names in lvg.Status.Nodes for %s: %+v", lvg.Name, lvg.Status.Nodes) - log.Error(errors.New(errMsg), errMsg) + log.Error( + errors.New("found different node names in lvg.Status.Nodes"), + "found different node names in lvg.Status.Nodes", + "lvgName", lvg.Name, + "nodes", lvg.Status.Nodes) multipleNodes = true } } @@ -295,14 +327,15 @@ func RunSdsInfraWatcher( // Update status for LVGs that have active pods if len(lvgsWithActivePods) > 0 { - log.Info("[RunSdsInfraWatcher] found LVGs with active sds-node-configurator agent's pods") + log.Info("found LVGs with active sds-node-configurator agent's pods") for _, lvg := range lvgsWithActivePods { + log := log.WithValues("lvgName", lvg.Name) err = updateLVGConditionIfNeeded(ctx, cl, log, &lvg, metav1.ConditionTrue, agentReadyType, "PodReady", "pod is ready to manage the resource") if err != nil { - log.Error(err, fmt.Sprintf("[RunSdsInfraWatcher] unable to add a condition to the LVMVolumeGroup %s", lvg.Name)) + log.Error(err, "unable to add a condition to the LVMVolumeGroup") continue } - log.Info(fmt.Sprintf("[RunSdsInfraWatcher] successfully reconciled the LVMVolumeGroup %s", lvg.Name)) + log.Info("successfully reconciled the LVMVolumeGroup") } } } diff --git a/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher_funcs.go b/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher_funcs.go index 7f70b81e4..c71bfe077 100644 --- a/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher_funcs.go +++ b/images/sds-health-watcher-controller/pkg/controller/sds_infra_watcher_funcs.go @@ -174,6 +174,11 @@ func GetLVMVolumeGroups(ctx context.Context, cl client.Client, metrics monitorin } func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logger.Logger, lvg *v1alpha1.LVMVolumeGroup, status metav1.ConditionStatus, conType, reason, message string) error { + log = log. + WithName("updateLVGConditionIfNeeded"). + WithValues( + "lvgName", lvg.Name, + "conditionType", conType) exist := false index := 0 newCondition := metav1.Condition{ @@ -186,35 +191,37 @@ func updateLVGConditionIfNeeded(ctx context.Context, cl client.Client, log logge } if lvg.Status.Conditions == nil { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] the LVMVolumeGroup %s conditions is nil. Initialize them", lvg.Name)) + log.Debug("the LVMVolumeGroup conditions is nil. Initialize them") lvg.Status.Conditions = make([]metav1.Condition, 0, 2) } if len(lvg.Status.Conditions) > 0 { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] there are some conditions in the LVMVolumeGroup %s. Tries to find a condition %s", lvg.Name, conType)) + log.Debug("there are some conditions in the LVMVolumeGroup. Tries to find a condition") for i, c := range lvg.Status.Conditions { if c.Type == conType { - log.Trace(fmt.Sprintf("[updateLVGConditionIfNeeded] old condition: %+v, new condition: %+v", c, newCondition)) + log.Trace("old condition and new condition", + "oldCondition", c, + "newCondition", newCondition) if checkIfEqualConditions(c, newCondition) { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no need to update condition %s in the LVMVolumeGroup %s as new and old condition states are the same", conType, lvg.Name)) + log.Debug("no need to update condition in the LVMVolumeGroup as new and old condition states are the same") return nil } index = i exist = true - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was found in the LVMVolumeGroup %s at the index %d", conType, lvg.Name, i)) + log.Debug("a condition was found in the LVMVolumeGroup at the index", "index", i) } } if !exist { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] a condition %s was not found. Append it in the end of the LVMVolumeGroup %s conditions", conType, lvg.Name)) + log.Debug("a condition was not found. Append it in the end of the LVMVolumeGroup conditions") lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) } else { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] insert the condition %s at index %d of the LVMVolumeGroup %s conditions", conType, index, lvg.Name)) + log.Debug("insert the condition at index of the LVMVolumeGroup conditions", "index", index) lvg.Status.Conditions[index] = newCondition } } else { - log.Debug(fmt.Sprintf("[updateLVGConditionIfNeeded] no conditions were found in the LVMVolumeGroup %s. Append the condition %s in the end", lvg.Name, conType)) + log.Debug("no conditions were found in the LVMVolumeGroup. Append the condition in the end") lvg.Status.Conditions = append(lvg.Status.Conditions, newCondition) } diff --git a/images/sds-health-watcher-controller/pkg/logger/logger.go b/images/sds-health-watcher-controller/pkg/logger/logger.go index 34af69e01..5904da2e9 100644 --- a/images/sds-health-watcher-controller/pkg/logger/logger.go +++ b/images/sds-health-watcher-controller/pkg/logger/logger.go @@ -17,7 +17,6 @@ limitations under the License. package logger import ( - "fmt" "strconv" "github.com/go-logr/logr" @@ -65,25 +64,33 @@ func (l Logger) GetLogger() logr.Logger { } func (l Logger) Error(err error, message string, keysAndValues ...interface{}) { - l.log.Error(err, fmt.Sprintf("ERROR %s", message), keysAndValues...) + l.log.WithValues("level", "ERROR").Error(err, message, keysAndValues...) } func (l Logger) Warning(message string, keysAndValues ...interface{}) { - l.log.V(warnLvl).Info(fmt.Sprintf("WARNING %s", message), keysAndValues...) + l.log.V(warnLvl).WithValues("level", "WARNING").Info(message, keysAndValues...) } func (l Logger) Info(message string, keysAndValues ...interface{}) { - l.log.V(infoLvl).Info(fmt.Sprintf("INFO %s", message), keysAndValues...) + l.log.V(infoLvl).WithValues("level", "INFO").Info(message, keysAndValues...) } func (l Logger) Debug(message string, keysAndValues ...interface{}) { - l.log.V(debugLvl).Info(fmt.Sprintf("DEBUG %s", message), keysAndValues...) + l.log.V(debugLvl).WithValues("level", "DEBUG").Info(message, keysAndValues...) } func (l Logger) Trace(message string, keysAndValues ...interface{}) { - l.log.V(traceLvl).Info(fmt.Sprintf("TRACE %s", message), keysAndValues...) + l.log.V(traceLvl).WithValues("level", "TRACE").Info(message, keysAndValues...) } func (l Logger) Cache(message string, keysAndValues ...interface{}) { - l.log.V(cacheLvl).Info(fmt.Sprintf("CACHE %s", message), keysAndValues...) + l.log.V(cacheLvl).WithValues("level", "CACHE").Info(message, keysAndValues...) +} + +func (l Logger) WithName(name string) Logger { + return Logger{log: l.log.WithName(name)} +} + +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + return Logger{log: l.log.WithValues(keysAndValues...)} } diff --git a/images/webhooks/go.mod b/images/webhooks/go.mod index db64ecaf6..efcedc493 100644 --- a/images/webhooks/go.mod +++ b/images/webhooks/go.mod @@ -16,7 +16,7 @@ replace github.com/deckhouse/sds-node-configurator/lib/go/common => ../../lib/go require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/images/webhooks/go.sum b/images/webhooks/go.sum index b3a6fedc9..ae96d8a23 100644 --- a/images/webhooks/go.sum +++ b/images/webhooks/go.sum @@ -5,8 +5,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=