From dbd366061009d36d7e137e05b39d99ac6aea56c7 Mon Sep 17 00:00:00 2001 From: Toby Archer Date: Mon, 29 Sep 2025 01:31:20 +0200 Subject: [PATCH 1/2] Begin upgrade to v1beta2 --- .golangci.yml | 8 +- Makefile | 12 +- api/v1beta1/awscluster_types.go | 18 +- api/v1beta1/awsclustertemplate_types.go | 2 +- api/v1beta1/awsmachine_types.go | 2 +- api/v1beta1/awsmachinetemplate_types.go | 2 +- api/v1beta1/conditions_consts.go | 2 +- api/v1beta1/tags.go | 2 +- api/v1beta1/types.go | 2 +- api/v1beta1/zz_generated.conversion.go | 59 +- api/v1beta1/zz_generated.deepcopy.go | 22 +- api/v1beta2/awscluster_types.go | 16 +- api/v1beta2/awscluster_webhook.go | 2 +- api/v1beta2/awscluster_webhook_test.go | 2 +- api/v1beta2/awsclustertemplate_types.go | 2 +- api/v1beta2/awsmachine_types.go | 46 +- api/v1beta2/awsmachinetemplate_types.go | 2 +- api/v1beta2/awsmanagedcluster_types.go | 12 +- api/v1beta2/conditions_consts.go | 38 +- api/v1beta2/tags.go | 2 +- api/v1beta2/types.go | 2 +- api/v1beta2/zz_generated.deepcopy.go | 34 +- bootstrap/eks/api/v1beta1/condition_consts.go | 2 +- bootstrap/eks/api/v1beta1/eksconfig_types.go | 2 +- .../api/v1beta1/zz_generated.conversion.go | 42 +- .../eks/api/v1beta1/zz_generated.deepcopy.go | 4 +- bootstrap/eks/api/v1beta2/condition_consts.go | 4 +- bootstrap/eks/api/v1beta2/eksconfig_types.go | 30 +- .../eks/api/v1beta2/zz_generated.deepcopy.go | 20 +- .../eks/controllers/eksconfig_controller.go | 94 ++- .../eksconfig_controller_reconciler_test.go | 43 +- .../controllers/eksconfig_controller_test.go | 11 +- cmd/clusterawsadm/gc/gc.go | 2 +- cmd/clusterawsadm/gc/gc_test.go | 27 +- ...bootstrap.cluster.x-k8s.io_eksconfigs.yaml | 74 +- ...ster.x-k8s.io_awsmanagedcontrolplanes.yaml | 182 +++-- ...8s.io_awsmanagedcontrolplanetemplates.yaml | 7 +- ...ne.cluster.x-k8s.io_rosacontrolplanes.yaml | 91 +-- ...tructure.cluster.x-k8s.io_awsclusters.yaml | 152 ++-- ....cluster.x-k8s.io_awsclustertemplates.yaml | 14 +- ...e.cluster.x-k8s.io_awsfargateprofiles.yaml | 91 +-- ...ture.cluster.x-k8s.io_awsmachinepools.yaml | 133 ++-- ...tructure.cluster.x-k8s.io_awsmachines.yaml | 91 +-- ...e.cluster.x-k8s.io_awsmanagedclusters.yaml | 77 +- ...r.x-k8s.io_awsmanagedclustertemplates.yaml | 7 +- ...uster.x-k8s.io_awsmanagedmachinepools.yaml | 53 +- ...ructure.cluster.x-k8s.io_rosaclusters.yaml | 77 +- ...ure.cluster.x-k8s.io_rosamachinepools.yaml | 64 +- controllers/awscluster_controller.go | 84 +- controllers/awscluster_controller_test.go | 57 +- .../awscluster_controller_unit_test.go | 15 +- controllers/awsmachine_controller.go | 222 +++++- controllers/awsmachine_controller_test.go | 36 +- .../awsmachine_controller_unit_test.go | 204 ++--- controllers/awsmanagedcluster_controller.go | 8 +- controllers/helpers_test.go | 6 +- controllers/rosacluster_controller.go | 30 +- controllers/rosacluster_controller_test.go | 48 +- controllers/suite_test.go | 6 +- .../v1beta1/awsmanagedcontrolplane_types.go | 11 +- .../eks/api/v1beta1/conditions_consts.go | 16 +- .../api/v1beta1/zz_generated.conversion.go | 29 +- .../eks/api/v1beta1/zz_generated.deepcopy.go | 11 +- .../v1beta2/awsmanagedcontrolplane_types.go | 50 +- .../eks/api/v1beta2/conditions_consts.go | 16 +- .../eks/api/v1beta2/zz_generated.deepcopy.go | 32 +- .../awsmanagedcontrolplane_controller.go | 81 +- .../awsmanagedcontrolplane_controller_test.go | 4 +- controlplane/eks/controllers/helpers_test.go | 2 +- controlplane/eks/controllers/suite_test.go | 2 +- .../rosa/api/v1beta2/conditions_consts.go | 10 +- .../api/v1beta2/rosacontrolplane_types.go | 38 +- .../rosa/api/v1beta2/zz_generated.deepcopy.go | 25 +- .../rosacontrolplane_controller.go | 162 ++-- .../rosacontrolplane_controller_test.go | 4 +- controlplane/rosa/controllers/suite_test.go | 2 +- exp/api/v1beta1/awsfargateprofile_types.go | 2 +- exp/api/v1beta1/awsmachinepool_types.go | 2 +- .../v1beta1/awsmanagedmachinepool_types.go | 2 +- exp/api/v1beta1/conditions_consts.go | 2 +- exp/api/v1beta1/zz_generated.conversion.go | 101 ++- exp/api/v1beta1/zz_generated.deepcopy.go | 8 +- exp/api/v1beta2/awsfargateprofile_types.go | 47 +- exp/api/v1beta2/awsfargateprofile_webhook.go | 2 +- .../v1beta2/awsfargateprofile_webhook_test.go | 2 +- exp/api/v1beta2/awsmachinepool_types.go | 82 +- .../v1beta2/awsmanagedmachinepool_types.go | 9 +- exp/api/v1beta2/conditions_consts.go | 28 +- exp/api/v1beta2/rosacluster_types.go | 14 +- exp/api/v1beta2/rosamachinepool_types.go | 21 +- exp/api/v1beta2/zz_generated.deepcopy.go | 74 +- exp/controlleridentitycreator/suite_test.go | 2 +- exp/controllers/awsfargatepool_controller.go | 25 +- exp/controllers/awsmachinepool_controller.go | 115 ++- .../awsmachinepool_controller_test.go | 69 +- exp/controllers/awsmachinepool_machines.go | 12 +- .../awsmanagedmachinepool_controller.go | 38 +- exp/controllers/rosamachinepool_controller.go | 13 +- .../rosamachinepool_controller_test.go | 29 +- exp/controllers/suite_test.go | 4 +- exp/instancestate/suite_test.go | 4 +- go.mod | 127 ++- go.sum | 281 ++++--- main.go | 4 +- pkg/cloud/interfaces.go | 5 +- pkg/cloud/scope/cluster.go | 52 +- pkg/cloud/scope/elb.go | 2 +- pkg/cloud/scope/fargate.go | 24 +- pkg/cloud/scope/launchtemplate.go | 4 +- pkg/cloud/scope/machine.go | 62 +- pkg/cloud/scope/machine_test.go | 2 +- pkg/cloud/scope/machinepool.go | 32 +- pkg/cloud/scope/managedcontrolplane.go | 23 +- pkg/cloud/scope/managednodegroup.go | 50 +- pkg/cloud/scope/rosacontrolplane.go | 4 +- pkg/cloud/scope/rosamachinepool.go | 30 +- pkg/cloud/scope/session.go | 45 +- pkg/cloud/scope/session_test.go | 2 +- pkg/cloud/scope/shared.go | 8 +- .../autoscaling/autoscalinggroup_test.go | 6 +- .../services/autoscaling/lifecyclehook.go | 4 +- pkg/cloud/services/ec2/bastion.go | 34 +- pkg/cloud/services/ec2/bastion_test.go | 2 +- pkg/cloud/services/ec2/helper_test.go | 11 +- pkg/cloud/services/ec2/instances.go | 33 +- pkg/cloud/services/ec2/instances_test.go | 2 +- pkg/cloud/services/ec2/launchtemplate.go | 98 ++- pkg/cloud/services/ec2/launchtemplate_test.go | 2 +- pkg/cloud/services/eks/cluster.go | 54 +- pkg/cloud/services/eks/cluster_test.go | 2 +- pkg/cloud/services/eks/config.go | 2 +- pkg/cloud/services/eks/config_test.go | 2 +- pkg/cloud/services/eks/eks.go | 4 +- pkg/cloud/services/eks/fargate.go | 4 +- pkg/cloud/services/eks/nodegroup.go | 2 +- pkg/cloud/services/eks/oidc_test.go | 2 +- pkg/cloud/services/eks/roles.go | 2 +- pkg/cloud/services/elb/loadbalancer.go | 53 +- pkg/cloud/services/elb/loadbalancer_test.go | 8 +- pkg/cloud/services/gc/cleanup_test.go | 2 +- pkg/cloud/services/iamauth/reconcile.go | 5 +- pkg/cloud/services/iamauth/reconcile_test.go | 9 +- pkg/cloud/services/iamauth/suite_test.go | 5 +- .../services/instancestate/helpers_test.go | 2 +- .../gomock_reflect_1375320870/prog.go | 66 ++ pkg/cloud/services/network/carriergateways.go | 6 +- .../services/network/carriergateways_test.go | 2 +- .../services/network/egress_only_gateways.go | 6 +- .../network/egress_only_gateways_test.go | 2 +- pkg/cloud/services/network/eips_test.go | 2 +- pkg/cloud/services/network/gateways.go | 6 +- pkg/cloud/services/network/gateways_test.go | 2 +- pkg/cloud/services/network/natgateways.go | 37 +- .../services/network/natgateways_test.go | 2 +- pkg/cloud/services/network/network.go | 289 +++++-- pkg/cloud/services/network/routetables.go | 6 +- .../services/network/routetables_test.go | 2 +- .../services/network/secondarycidr_test.go | 4 +- pkg/cloud/services/network/subnets.go | 6 +- pkg/cloud/services/network/subnets_test.go | 2 +- pkg/cloud/services/network/vpc.go | 8 +- pkg/cloud/services/network/vpc_test.go | 2 +- .../gomock_reflect_2183343406/prog.go | 66 ++ pkg/cloud/services/s3/s3_test.go | 2 +- .../services/secretsmanager/secret_test.go | 2 +- .../services/securitygroup/securitygroups.go | 41 +- .../securitygroup/securitygroups_test.go | 2 +- pkg/cloud/services/ssm/secret_test.go | 2 +- pkg/utils/utils.go | 9 +- test/e2e/shared/common.go | 2 +- test/e2e/shared/gpu.go | 2 +- test/e2e/shared/workload.go | 2 +- .../suites/managed/control_plane_helpers.go | 2 +- test/e2e/suites/managed/machine_deployment.go | 2 +- .../managed/machine_deployment_helpers.go | 2 +- .../suites/managed/machine_pool_helpers.go | 8 +- test/e2e/suites/managed/managed_suite_test.go | 4 +- test/e2e/suites/unmanaged/helpers_test.go | 10 +- .../unmanaged_classic_elb_upgrade_test.go | 2 +- .../unmanaged_functional_clusterclass_test.go | 2 +- .../unmanaged/unmanaged_functional_test.go | 6 +- test/helpers/envtest.go | 2 +- test/mocks/capa_clusterscoper_mock.go | 4 +- util/clusterapiv1beta1/util.go | 753 ++++++++++++++++++ util/conditions/helper.go | 4 +- util/paused/paused.go | 48 +- util/paused/paused_test.go | 5 +- 187 files changed, 3915 insertions(+), 2227 deletions(-) create mode 100644 pkg/cloud/services/mock_services/gomock_reflect_1375320870/prog.go create mode 100644 pkg/cloud/services/s3/mock_s3iface/gomock_reflect_2183343406/prog.go create mode 100644 util/clusterapiv1beta1/util.go diff --git a/.golangci.yml b/.golangci.yml index d87ebbdf39..5f4f1f1df1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -131,6 +131,8 @@ linters: alias: eksbootstrapcontrollers - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/controllers alias: ekscontrolplanecontrollers + - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/util/clusterapiv1beta1 + alias: clusterapiv1beta1util - pkg: sigs.k8s.io/cluster-api-provider-aws/v2/exp/controllers alias: expcontrollers - pkg: k8s.io/apimachinery/pkg/runtime @@ -139,7 +141,7 @@ linters: alias: runtimeserializer - pkg: k8s.io/apimachinery/pkg/runtime/serializer/yaml alias: yamlserializer - - pkg: sigs.k8s.io/cluster-api/api/v1beta1 + - pkg: sigs.k8s.io/cluster-api/api/core/v1beta1 alias: clusterv1 - pkg: sigs.k8s.io/cluster-api/util/defaulting alias: utildefaulting @@ -169,8 +171,8 @@ linters: alias: crclient - pkg: k8s.io/apimachinery/pkg/types alias: apimachinerytypes - - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 - alias: expclusterv1 + - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta2 + alias: clusterv1 no-unaliased: false nolintlint: require-specific: true diff --git a/Makefile b/Makefile index b820793ba8..7a06bbb3cc 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,7 @@ endif .PHONY: defaulters defaulters: $(DEFAULTER_GEN) ## Generate all Go types $(DEFAULTER_GEN) \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta1 \ --v=0 \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ --output-file=zz_generated.defaults.go \ @@ -262,7 +262,7 @@ generate-go-apis: ## Alias for .build/generate-go-apis $(MAKE) defaulters $(CONVERSION_GEN) \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta1 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./api/v1beta1 \ @@ -270,28 +270,28 @@ generate-go-apis: ## Alias for .build/generate-go-apis $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta1 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./$(EXP_DIR)/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta1 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./bootstrap/eks/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta1 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./controlplane/eks/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta1 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./controlplane/rosa/api/v1beta2 diff --git a/api/v1beta1/awscluster_types.go b/api/v1beta1/awscluster_types.go index ddb1d2cd5a..9db82f74d3 100644 --- a/api/v1beta1/awscluster_types.go +++ b/api/v1beta1/awscluster_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -200,11 +200,11 @@ type AWSLoadBalancerSpec struct { // AWSClusterStatus defines the observed state of AWSCluster. type AWSClusterStatus struct { // +kubebuilder:default=false - Ready bool `json:"ready"` - Network NetworkStatus `json:"networkStatus,omitempty"` - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Bastion *Instance `json:"bastion,omitempty"` - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Ready bool `json:"ready"` + Network NetworkStatus `json:"networkStatus,omitempty"` + FailureDomains []clusterv1.FailureDomain `json:"failureDomains,omitempty"` + Bastion *Instance `json:"bastion,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. @@ -254,12 +254,12 @@ type AWSClusterList struct { } // GetConditions returns the observations of the operational state of the AWSCluster resource. -func (r *AWSCluster) GetConditions() clusterv1.Conditions { +func (r *AWSCluster) GetConditions() []metav1.Condition { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions. -func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSCluster to the predescribed []metav1.Condition. +func (r *AWSCluster) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/awsclustertemplate_types.go b/api/v1beta1/awsclustertemplate_types.go index 07e2cf4039..622e6894c4 100644 --- a/api/v1beta1/awsclustertemplate_types.go +++ b/api/v1beta1/awsclustertemplate_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. diff --git a/api/v1beta1/awsmachine_types.go b/api/v1beta1/awsmachine_types.go index 25a8cb4dcd..decc71c7f0 100644 --- a/api/v1beta1/awsmachine_types.go +++ b/api/v1beta1/awsmachine_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/api/v1beta1/awsmachinetemplate_types.go b/api/v1beta1/awsmachinetemplate_types.go index 6e86295c6b..00fa64fe80 100644 --- a/api/v1beta1/awsmachinetemplate_types.go +++ b/api/v1beta1/awsmachinetemplate_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. diff --git a/api/v1beta1/conditions_consts.go b/api/v1beta1/conditions_consts.go index ae5d761df1..c444705f68 100644 --- a/api/v1beta1/conditions_consts.go +++ b/api/v1beta1/conditions_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. diff --git a/api/v1beta1/tags.go b/api/v1beta1/tags.go index a727d39cf4..de54e1d4c0 100644 --- a/api/v1beta1/tags.go +++ b/api/v1beta1/tags.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // Tags defines a map of tags. diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index fe6510380b..596c65a329 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -19,7 +19,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSResourceReference is a reference to a specific AWS resource by ID or filters. diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 9c7a33e9fb..ca68b8ae2a 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -25,11 +25,13 @@ import ( time "time" unsafe "unsafe" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func init() { @@ -1036,7 +1038,7 @@ func autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClu if err := Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(&in.Network, &out.Network, s); err != nil { return err } - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*[]corev1beta2.FailureDomain)(unsafe.Pointer(&in.FailureDomains)) if in.Bastion != nil { in, out := &in.Bastion, &out.Bastion *out = new(v1beta2.Instance) @@ -1046,7 +1048,7 @@ func autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClu } else { out.Bastion = nil } - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1060,7 +1062,7 @@ func autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta if err := Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil { return err } - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*[]corev1beta2.FailureDomain)(unsafe.Pointer(&in.FailureDomains)) if in.Bastion != nil { in, out := &in.Bastion, &out.Bastion *out = new(Instance) @@ -1070,7 +1072,7 @@ func autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta } else { out.Bastion = nil } - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1459,27 +1461,38 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW func autoConvert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachineStatus, out *v1beta2.AWSMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Interruptible = in.Interruptible - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta2.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*v1beta2.InstanceState)(unsafe.Pointer(in.InstanceState)) - out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type + // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert corev1beta1.Condition to v1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } -// Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus is an autogenerated conversion function. -func Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachineStatus, out *v1beta2.AWSMachineStatus, s conversion.Scope) error { - return autoConvert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in, out, s) -} - func autoConvert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *v1beta2.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Interruptible = in.Interruptible - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState)) - out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(corev1beta1.Conditions, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert v1.Condition to corev1beta1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } @@ -1617,7 +1630,7 @@ func Convert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(in } func autoConvert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatus(in *AWSMachineTemplateStatus, out *v1beta2.AWSMachineTemplateStatus, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity)) return nil } @@ -1627,7 +1640,7 @@ func Convert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatu } func autoConvert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in *v1beta2.AWSMachineTemplateStatus, out *AWSMachineTemplateStatus, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Capacity = *(*corev1.ResourceList)(unsafe.Pointer(&in.Capacity)) return nil } @@ -2009,7 +2022,7 @@ func autoConvert_v1beta1_Instance_To_v1beta2_Instance(in *Instance, out *v1beta2 out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) out.UserData = (*string)(unsafe.Pointer(in.UserData)) out.IAMProfile = in.IAMProfile - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta2.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP)) out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP)) out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport)) @@ -2040,7 +2053,7 @@ func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) out.UserData = (*string)(unsafe.Pointer(in.UserData)) out.IAMProfile = in.IAMProfile - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP)) out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP)) out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport)) diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index b3f9c154cf..f195259615 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -21,9 +21,11 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -409,9 +411,9 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]v1beta2.FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Bastion != nil { @@ -421,7 +423,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -741,7 +743,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]apiv1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.InstanceState != nil { @@ -761,7 +763,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -875,7 +877,7 @@ func (in *AWSMachineTemplateStatus) DeepCopyInto(out *AWSMachineTemplateStatus) *out = *in if in.Capacity != nil { in, out := &in.Capacity, &out.Capacity - *out = make(v1.ResourceList, len(*in)) + *out = make(corev1.ResourceList, len(*in)) for key, val := range *in { (*out)[key] = val.DeepCopy() } @@ -1305,7 +1307,7 @@ func (in *Instance) DeepCopyInto(out *Instance) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]apiv1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.PrivateIP != nil { diff --git a/api/v1beta2/awscluster_types.go b/api/v1beta2/awscluster_types.go index 213ad99c56..48fa309aed 100644 --- a/api/v1beta2/awscluster_types.go +++ b/api/v1beta2/awscluster_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -276,11 +276,11 @@ type AdditionalListenerSpec struct { // AWSClusterStatus defines the observed state of AWSCluster. type AWSClusterStatus struct { // +kubebuilder:default=false - Ready bool `json:"ready"` - Network NetworkStatus `json:"networkStatus,omitempty"` - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Bastion *Instance `json:"bastion,omitempty"` - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Ready bool `json:"ready"` + Network NetworkStatus `json:"networkStatus,omitempty"` + FailureDomains []clusterv1.FailureDomain `json:"failureDomains,omitempty"` + Bastion *Instance `json:"bastion,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. @@ -346,12 +346,12 @@ type AWSClusterList struct { } // GetConditions returns the observations of the operational state of the AWSCluster resource. -func (r *AWSCluster) GetConditions() clusterv1.Conditions { +func (r *AWSCluster) GetConditions() []metav1.Condition { return r.Status.Conditions } // SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions. -func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) { +func (r *AWSCluster) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index ec4fac40af..525e3157c3 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" ) diff --git a/api/v1beta2/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go index ad1b22d5fb..04f2e89cdd 100644 --- a/api/v1beta2/awscluster_webhook_test.go +++ b/api/v1beta2/awscluster_webhook_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestAWSClusterDefault(t *testing.T) { diff --git a/api/v1beta2/awsclustertemplate_types.go b/api/v1beta2/awsclustertemplate_types.go index e0a827fa3d..e51e8b583d 100644 --- a/api/v1beta2/awsclustertemplate_types.go +++ b/api/v1beta2/awsclustertemplate_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. diff --git a/api/v1beta2/awsmachine_types.go b/api/v1beta2/awsmachine_types.go index 7031bdbaae..82bfbbc501 100644 --- a/api/v1beta2/awsmachine_types.go +++ b/api/v1beta2/awsmachine_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -397,47 +397,9 @@ type AWSMachineStatus struct { // +optional InstanceState *InstanceState `json:"instanceState,omitempty"` - // FailureReason will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a succinct value suitable - // for machine interpretation. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - FailureReason *string `json:"failureReason,omitempty"` - - // FailureMessage will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a more verbose string suitable - // for logging and human consumption. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - FailureMessage *string `json:"failureMessage,omitempty"` - // Conditions defines current service state of the AWSMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -461,12 +423,12 @@ type AWSMachine struct { } // GetConditions returns the observations of the operational state of the AWSMachine resource. -func (r *AWSMachine) GetConditions() clusterv1.Conditions { +func (r *AWSMachine) GetConditions() []metav1.Condition { return r.Status.Conditions } // SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1.Conditions. -func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) { +func (r *AWSMachine) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/awsmachinetemplate_types.go b/api/v1beta2/awsmachinetemplate_types.go index 50d8dda22d..9f6e2ee774 100644 --- a/api/v1beta2/awsmachinetemplate_types.go +++ b/api/v1beta2/awsmachinetemplate_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. diff --git a/api/v1beta2/awsmanagedcluster_types.go b/api/v1beta2/awsmanagedcluster_types.go index 67d9b2fc92..c7cba3b093 100644 --- a/api/v1beta2/awsmanagedcluster_types.go +++ b/api/v1beta2/awsmanagedcluster_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // AWSManagedClusterSpec defines the desired state of AWSManagedCluster @@ -37,11 +37,11 @@ type AWSManagedClusterStatus struct { // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains []clusterv1.FailureDomain `json:"failureDomains,omitempty"` // Conditions defines current service state of the AWSManagedCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -76,12 +76,12 @@ func init() { // GetConditions returns the observations of the operational state of the // AWSManagedCluster resource. -func (r *AWSManagedCluster) GetConditions() clusterv1.Conditions { +func (r *AWSManagedCluster) GetConditions() []metav1.Condition { return r.Status.Conditions } // SetConditions sets the underlying service state of the AWSManagedCluster to -// the predescribed clusterv1.Conditions. -func (r *AWSManagedCluster) SetConditions(conditions clusterv1.Conditions) { +// the predescribed []metav1.Condition. +func (r *AWSManagedCluster) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index 604ef8e1d5..583772d76e 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -16,19 +16,17 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. - PrincipalCredentialRetrievedCondition clusterv1.ConditionType = "PrincipalCredentialRetrieved" + PrincipalCredentialRetrievedCondition = "PrincipalCredentialRetrieved" // PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval. PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed" // CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval. //nolint:gosec CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed" // PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace. - PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed" + PrincipalUsageAllowedCondition = "PrincipalUsageAllowed" // PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list. PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized" // SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces @@ -38,7 +36,7 @@ const ( const ( // VpcReadyCondition reports on the successful reconciliation of a VPC. - VpcReadyCondition clusterv1.ConditionType = "VpcReady" + VpcReadyCondition = "VpcReady" // VpcCreationStartedReason used when attempting to create a VPC for a managed cluster. // Will not be applied to unmanaged clusters. VpcCreationStartedReason = "VpcCreationStarted" @@ -48,7 +46,7 @@ const ( const ( // SubnetsReadyCondition reports on the successful reconciliation of subnets. - SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady" + SubnetsReadyCondition = "SubnetsReady" // SubnetsReconciliationFailedReason used to report failures while reconciling subnets. SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed" ) @@ -56,7 +54,7 @@ const ( const ( // InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways. // Only applicable to managed clusters. - InternetGatewayReadyCondition clusterv1.ConditionType = "InternetGatewayReady" + InternetGatewayReadyCondition = "InternetGatewayReady" // InternetGatewayFailedReason used when errors occur during internet gateway reconciliation. InternetGatewayFailedReason = "InternetGatewayFailed" ) @@ -64,7 +62,7 @@ const ( const ( // EgressOnlyInternetGatewayReadyCondition reports on the successful reconciliation of egress only internet gateways. // Only applicable to managed clusters. - EgressOnlyInternetGatewayReadyCondition clusterv1.ConditionType = "EgressOnlyInternetGatewayReady" + EgressOnlyInternetGatewayReadyCondition = "EgressOnlyInternetGatewayReady" // EgressOnlyInternetGatewayFailedReason used when errors occur during egress only internet gateway reconciliation. EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed" ) @@ -72,7 +70,7 @@ const ( const ( // CarrierGatewayReadyCondition reports on the successful reconciliation of carrier gateways. // Only applicable to managed clusters. - CarrierGatewayReadyCondition clusterv1.ConditionType = "CarrierGatewayReady" + CarrierGatewayReadyCondition = "CarrierGatewayReady" // CarrierGatewayFailedReason used when errors occur during carrier gateway reconciliation. CarrierGatewayFailedReason = "CarrierGatewayFailed" ) @@ -80,7 +78,7 @@ const ( const ( // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways. // Only applicable to managed clusters. - NatGatewaysReadyCondition clusterv1.ConditionType = "NatGatewaysReady" + NatGatewaysReadyCondition = "NatGatewaysReady" // NatGatewaysCreationStartedReason set once when creating new NAT gateways. NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted" // NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways. @@ -90,7 +88,7 @@ const ( const ( // RouteTablesReadyCondition reports successful reconciliation of route tables. // Only applicable to managed clusters. - RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady" + RouteTablesReadyCondition = "RouteTablesReady" // RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables. RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed" ) @@ -98,7 +96,7 @@ const ( const ( // VpcEndpointsReadyCondition reports successful reconciliation of vpc endpoints. // Only applicable to managed clusters. - VpcEndpointsReadyCondition clusterv1.ConditionType = "VpcEndpointsReadyCondition" + VpcEndpointsReadyCondition = "VpcEndpointsReady" // VpcEndpointsReconciliationFailedReason used when any errors occur during reconciliation of vpc endpoints. VpcEndpointsReconciliationFailedReason = "VpcEndpointsReconciliationFailed" ) @@ -106,14 +104,14 @@ const ( const ( // SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks. // Only applicable to managed clusters. - SecondaryCidrsReadyCondition clusterv1.ConditionType = "SecondaryCidrsReady" + SecondaryCidrsReadyCondition = "SecondaryCidrsReady" // SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks. SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed" ) const ( // ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups. - ClusterSecurityGroupsReadyCondition clusterv1.ConditionType = "ClusterSecurityGroupsReady" + ClusterSecurityGroupsReadyCondition = "ClusterSecurityGroupsReady" // ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups. ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed" ) @@ -121,7 +119,7 @@ const ( const ( // BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster // may not require a bastion host and this condition will be skipped. - BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady" + BastionHostReadyCondition = "BastionHostReady" // BastionCreationStartedReason used when creating a new bastion host. BastionCreationStartedReason = "BastionCreationStarted" // BastionHostFailedReason used when an error occurs during the creation of a bastion host. @@ -130,7 +128,7 @@ const ( const ( // LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled. - LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" + LoadBalancerReadyCondition = "LoadBalancerReady" // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. WaitForDNSNameReason = "WaitForDNSName" // WaitForExternalControlPlaneEndpointReason is available when the AWS Cluster is waiting for an externally managed @@ -144,7 +142,7 @@ const ( const ( // InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state. - InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + InstanceReadyCondition = "InstanceReady" // InstanceNotFoundReason used when the instance couldn't be retrieved. InstanceNotFoundReason = "InstanceNotFound" @@ -166,7 +164,7 @@ const ( const ( // SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine. - SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady" + SecurityGroupsReadyCondition = "SecurityGroupsReady" // SecurityGroupsFailedReason used when the security groups could not be synced. SecurityGroupsFailedReason = "SecurityGroupsSyncFailed" @@ -177,7 +175,7 @@ const ( // When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ. // Note this is only applicable to control plane machines. // Only applicable to control plane machines. - ELBAttachedCondition clusterv1.ConditionType = "ELBAttached" + ELBAttachedCondition = "ELBAttached" // ELBAttachFailedReason used when a control plane node fails to attach to the ELB. ELBAttachFailedReason = "ELBAttachFailed" @@ -187,7 +185,7 @@ const ( const ( // S3BucketReadyCondition indicates an S3 bucket has been created successfully. - S3BucketReadyCondition clusterv1.ConditionType = "S3BucketCreated" + S3BucketReadyCondition = "S3BucketCreated" // S3BucketFailedReason is used when any errors occur during reconciliation of an S3 bucket. S3BucketFailedReason = "S3BucketCreationFailed" diff --git a/api/v1beta2/tags.go b/api/v1beta2/tags.go index 45bc371a49..344434f4a5 100644 --- a/api/v1beta2/tags.go +++ b/api/v1beta2/tags.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // Tags defines a map of tags. diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index c268165c10..4d173ef62e 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 197cffba66..5334fd05e6 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -415,9 +415,9 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]corev1beta2.FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Bastion != nil { @@ -427,7 +427,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -799,7 +799,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]v1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta2.MachineAddress, len(*in)) copy(*out, *in) } if in.InstanceState != nil { @@ -807,19 +807,9 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { *out = new(InstanceState) **out = **in } - if in.FailureReason != nil { - in, out := &in.FailureReason, &out.FailureReason - *out = new(string) - **out = **in - } - if in.FailureMessage != nil { - in, out := &in.FailureMessage, &out.FailureMessage - *out = new(string) - **out = **in - } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1030,14 +1020,14 @@ func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) { *out = *in if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]corev1beta2.FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1649,7 +1639,7 @@ func (in *Instance) DeepCopyInto(out *Instance) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]v1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta2.MachineAddress, len(*in)) copy(*out, *in) } if in.PrivateIP != nil { diff --git a/bootstrap/eks/api/v1beta1/condition_consts.go b/bootstrap/eks/api/v1beta1/condition_consts.go index 86ef328727..44ccbe0d85 100644 --- a/bootstrap/eks/api/v1beta1/condition_consts.go +++ b/bootstrap/eks/api/v1beta1/condition_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go diff --git a/bootstrap/eks/api/v1beta1/eksconfig_types.go b/bootstrap/eks/api/v1beta1/eksconfig_types.go index d268722878..8958e3a64a 100644 --- a/bootstrap/eks/api/v1beta1/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta1/eksconfig_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. diff --git a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go index eba4f6f7ce..1367cb9d77 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go @@ -24,10 +24,11 @@ package v1beta1 import ( unsafe "unsafe" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -248,33 +249,40 @@ func autoConvert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(in *v1beta2.EKSC func autoConvert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in *EKSConfigStatus, out *v1beta2.EKSConfigStatus, s conversion.Scope) error { out.Ready = in.Ready out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) - out.FailureReason = in.FailureReason - out.FailureMessage = in.FailureMessage + // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type + // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert corev1beta1.Condition to v1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } -// Convert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus is an autogenerated conversion function. -func Convert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in *EKSConfigStatus, out *v1beta2.EKSConfigStatus, s conversion.Scope) error { - return autoConvert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in, out, s) -} - func autoConvert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *v1beta2.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error { out.Ready = in.Ready out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) - out.FailureReason = in.FailureReason - out.FailureMessage = in.FailureMessage + // WARNING: in.Initialization requires manual conversion: does not exist in peer-type out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(corev1beta1.Conditions, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert v1.Condition to corev1beta1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } -// Convert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus is an autogenerated conversion function. -func Convert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *v1beta2.EKSConfigStatus, out *EKSConfigStatus, s conversion.Scope) error { - return autoConvert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in, out, s) -} - func autoConvert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta2.EKSConfigTemplate, s conversion.Scope) error { out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta diff --git a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go index 131707fac7..b09343ed28 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -151,7 +151,7 @@ func (in *EKSConfigStatus) DeepCopyInto(out *EKSConfigStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/bootstrap/eks/api/v1beta2/condition_consts.go b/bootstrap/eks/api/v1beta2/condition_consts.go index e12213c840..942789425f 100644 --- a/bootstrap/eks/api/v1beta2/condition_consts.go +++ b/bootstrap/eks/api/v1beta2/condition_consts.go @@ -16,8 +16,6 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go @@ -27,7 +25,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition = "DataSecretAvailable" // DataSecretGenerationFailedReason (Severity=Warning) documents a EKSConfig controller detecting // an error while generating a data secret; those kind of errors are usually due to misconfigurations diff --git a/bootstrap/eks/api/v1beta2/eksconfig_types.go b/bootstrap/eks/api/v1beta2/eksconfig_types.go index a2fce8e2cb..3b6db78f9b 100644 --- a/bootstrap/eks/api/v1beta2/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta2/eksconfig_types.go @@ -18,8 +18,6 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. @@ -84,6 +82,15 @@ type PauseContainer struct { Version string `json:"version"` } +type EKSConfigInitializationStatus struct { + // BootstrapDataSecretCreated is true when the bootstrap provider reports that the Machine's boostrap secret is created. + // NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial Machine provisioning. + // The value of this field is never updated after provisioning is completed. + // Use conditions to monitor the operational state of the Machine's BootstrapSecret. + // +optional + BootstrapDataSecretCreated bool `json:"bootstrapDataSecretCreated"` +} + // EKSConfigStatus defines the observed state of the Amazon EKS Bootstrap Configuration. type EKSConfigStatus struct { // Ready indicates the BootstrapData secret is ready to be consumed @@ -93,13 +100,12 @@ type EKSConfigStatus struct { // +optional DataSecretName *string `json:"dataSecretName,omitempty"` - // FailureReason will be set on non-retryable errors - // +optional - FailureReason string `json:"failureReason,omitempty"` - - // FailureMessage will be set on non-retryable errors + // Initialization provides observations of the Machine initialization process. + // NOTE: Fields in this struct are part of the Cluster API contract and are used to orchestrate initial Machine provisioning. + // The value of those fields is never updated after provisioning is completed. + // Use conditions to monitor the operational state of the Machine. // +optional - FailureMessage string `json:"failureMessage,omitempty"` + Initialization EKSConfigInitializationStatus `json:"initialization,omitempty,omitzero"` // ObservedGeneration is the latest generation observed by the controller. // +optional @@ -107,7 +113,7 @@ type EKSConfigStatus struct { // Conditions defines current service state of the EKSConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // Encoding specifies the cloud-init file encoding. @@ -324,12 +330,12 @@ type EKSConfig struct { } // GetConditions returns the observations of the operational state of the EKSConfig resource. -func (r *EKSConfig) GetConditions() clusterv1.Conditions { +func (r *EKSConfig) GetConditions() []metav1.Condition { return r.Status.Conditions } -// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1.Conditions. -func (r *EKSConfig) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the EKSConfig to the predescribed []metav1.Condition. +func (r *EKSConfig) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go index 7b059799a7..6f480fb9f4 100644 --- a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go @@ -21,8 +21,8 @@ limitations under the License. package v1beta2 import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -81,6 +81,21 @@ func (in *EKSConfig) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EKSConfigInitializationStatus) DeepCopyInto(out *EKSConfigInitializationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSConfigInitializationStatus. +func (in *EKSConfigInitializationStatus) DeepCopy() *EKSConfigInitializationStatus { + if in == nil { + return nil + } + out := new(EKSConfigInitializationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EKSConfigList) DeepCopyInto(out *EKSConfigList) { *out = *in @@ -228,9 +243,10 @@ func (in *EKSConfigStatus) DeepCopyInto(out *EKSConfigStatus) { *out = new(string) **out = **in } + out.Initialization = in.Initialization if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index ca55199a6b..b0cebce6e9 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -20,6 +20,7 @@ package controllers import ( "bytes" "context" + "fmt" "time" "github.com/pkg/errors" @@ -30,6 +31,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" + clusterapiv1beta1util "sigs.k8s.io/cluster-api-provider-aws/v2/util/clusterapiv1beta1" + "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -41,11 +44,9 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -100,7 +101,7 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) if err != nil { - if errors.Is(err, util.ErrNoCluster) { + if errors.Is(err, clusterapiv1beta1util.ErrNoCluster) { log.Info("EKSConfig does not belong to a cluster yet, re-queuing until it's part of a cluster") return ctrl.Result{RequeueAfter: time.Minute}, nil } @@ -124,12 +125,22 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // set up defer block for updating config defer func() { - conditions.SetSummary(config, - conditions.WithConditions( - eksbootstrapv1.DataSecretAvailableCondition, - ), - conditions.WithStepCounter(), - ) + forConditionTypes := conditions.ForConditionTypes{ + eksbootstrapv1.DataSecretAvailableCondition, + } + + summaryOpts := []conditions.SummaryOption{ + forConditionTypes, + } + + readyCondition, err := conditions.NewSummaryCondition(config, clusterv1.ReadyCondition, summaryOpts...) + if err != nil { + readyCondition = &metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + } + } + conditions.Set(config, *readyCondition) patchOpts := []patch.Option{} if rerr == nil { @@ -202,27 +213,33 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 } } - if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { + if cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { return errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider") } - if !cluster.Status.InfrastructureReady { + if !*cluster.Status.Initialization.InfrastructureProvisioned { log.Info("Cluster infrastructure is not ready") - conditions.MarkFalse(config, - eksbootstrapv1.DataSecretAvailableCondition, - eksbootstrapv1.WaitingForClusterInfrastructureReason, - clusterv1.ConditionSeverityInfo, "") + conditions.Set(config, metav1.Condition{ + Type: eksbootstrapv1.DataSecretAvailableCondition, + Status: metav1.ConditionFalse, + Reason: eksbootstrapv1.WaitingForClusterInfrastructureReason, + }) return nil } - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneAvailableCondition) { log.Info("Control Plane has not yet been initialized") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(config, metav1.Condition{ + Type: eksbootstrapv1.DataSecretAvailableCondition, + Status: metav1.ConditionFalse, + Reason: eksbootstrapv1.WaitingForControlPlaneInitializationReason, + }) return nil } controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} - if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil { + // TODO(@tobbbles): Check if we delegate the namespace + if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name}, controlPlane); err != nil { return err } @@ -230,7 +247,12 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 files, err := r.resolveFiles(ctx, config) if err != nil { log.Info("Failed to resolve files for user data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(config, metav1.Condition{ + Type: eksbootstrapv1.DataSecretAvailableCondition, + Status: metav1.ConditionFalse, + Reason: eksbootstrapv1.DataSecretGenerationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } @@ -275,14 +297,22 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 userDataScript, err := userdata.NewNode(nodeInput) if err != nil { log.Error(err, "Failed to create a worker join configuration") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + conditions.Set(config, metav1.Condition{ + Type: eksbootstrapv1.DataSecretAvailableCondition, + Status: metav1.ConditionFalse, + Reason: eksbootstrapv1.DataSecretGenerationFailedReason, + }) return err } // store userdata as secret if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil { log.Error(err, "Failed to store bootstrap data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + conditions.Set(config, metav1.Condition{ + Type: eksbootstrapv1.DataSecretAvailableCondition, + Status: metav1.ConditionFalse, + Reason: eksbootstrapv1.DataSecretGenerationFailedReason, + }) return err } @@ -301,7 +331,7 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man if feature.Gates.Enabled(feature.MachinePool) { b = b.Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), ) } @@ -314,7 +344,7 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc((r.ClusterToEKSConfigs)), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())), ) if err != nil { return errors.Wrap(err, "failed adding watch for Clusters to controller manager") @@ -357,7 +387,11 @@ func (r *EKSConfigReconciler) storeBootstrapData(ctx context.Context, cluster *c config.Status.DataSecretName = ptr.To[string](secret.Name) config.Status.Ready = true - conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) + + conditions.Set(config, metav1.Condition{ + Type: eksbootstrapv1.DataSecretAvailableCondition, + Status: metav1.ConditionTrue, + }) return nil } @@ -370,7 +404,7 @@ func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o cli if !ok { klog.Errorf("Expected a Machine but got a %T", o) } - if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig") { + if m.Spec.Bootstrap.ConfigRef.IsDefined() && m.Spec.Bootstrap.ConfigRef.GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } @@ -382,12 +416,12 @@ func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o cli func (r *EKSConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { result := []ctrl.Request{} - m, ok := o.(*expclusterv1.MachinePool) + m, ok := o.(*clusterv1.MachinePool) if !ok { klog.Errorf("Expected a MachinePool but got a %T", o) } configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef - if configRef != nil && configRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { + if configRef.IsDefined() && configRef.GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } @@ -418,8 +452,8 @@ func (r *EKSConfigReconciler) ClusterToEKSConfigs(_ context.Context, o client.Ob } for _, m := range machineList.Items { - if m.Spec.Bootstrap.ConfigRef != nil && - m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { + if m.Spec.Bootstrap.ConfigRef.IsDefined() && + m.Spec.Bootstrap.ConfigRef.GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } diff --git a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go index 163b94a338..2790a97264 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go @@ -24,14 +24,14 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -85,7 +85,7 @@ func TestEKSConfigReconciler(t *testing.T) { config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ { Kind: "MachinePool", - APIVersion: v1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Name: mp.Name, UID: types.UID(fmt.Sprintf("%s uid", mp.Name)), }, @@ -284,17 +284,21 @@ func newCluster(name string) *clusterv1.Cluster { Name: name, }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Name: name, - Kind: "AWSManagedControlPlane", - Namespace: "default", + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSManagedControlPlane", }, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + ControlPlaneInitialized: ptr.To[bool](true), + }, }, } - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + conditions.Set(cluster, metav1.Condition{ + Type: string(clusterv1.ControlPlaneInitializedV1Beta1Condition), + Status: metav1.ConditionTrue, + }) return cluster } @@ -317,9 +321,9 @@ func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine { }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - Kind: "EKSConfig", - APIVersion: eksbootstrapv1.GroupVersion.String(), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + APIGroup: eksbootstrapv1.GroupVersion.Group, }, }, }, @@ -334,24 +338,24 @@ func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine { } // newMachinePool returns a CAPI machine object; if cluster is not nil, the MachinePool is linked to the cluster as well. -func newMachinePool(cluster *clusterv1.Cluster, name string) *v1beta1.MachinePool { +func newMachinePool(cluster *clusterv1.Cluster, name string) *clusterv1.MachinePool { generatedName := fmt.Sprintf("%s-%s", name, util.RandomString(5)) - mp := &v1beta1.MachinePool{ + mp := &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", - APIVersion: v1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: generatedName, }, - Spec: v1beta1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - Kind: "EKSConfig", - APIVersion: eksbootstrapv1.GroupVersion.String(), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + APIGroup: eksbootstrapv1.GroupVersion.String(), }, }, }, @@ -397,7 +401,6 @@ func newEKSConfig(machine *clusterv1.Machine) *eksbootstrapv1.EKSConfig { } config.Status.DataSecretName = &machine.Name machine.Spec.Bootstrap.ConfigRef.Name = config.Name - machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace } return config } diff --git a/bootstrap/eks/controllers/eksconfig_controller_test.go b/bootstrap/eks/controllers/eksconfig_controller_test.go index bb82d14124..c32d9a6d6d 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_test.go @@ -22,8 +22,9 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" ) @@ -35,7 +36,9 @@ func TestEKSConfigReconcilerReturnEarlyIfClusterInfraNotReady(t *testing.T) { config := newEKSConfig(machine) cluster.Status = clusterv1.ClusterStatus{ - InfrastructureReady: false, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To[bool](false), + }, } reconciler := EKSConfigReconciler{ @@ -56,7 +59,9 @@ func TestEKSConfigReconcilerReturnEarlyIfClusterControlPlaneNotInitialized(t *te config := newEKSConfig(machine) cluster.Status = clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To[bool](true), + }, } reconciler := EKSConfigReconciler{ diff --git a/cmd/clusterawsadm/gc/gc.go b/cmd/clusterawsadm/gc/gc.go index dac5a1f004..ab44eee895 100644 --- a/cmd/clusterawsadm/gc/gc.go +++ b/cmd/clusterawsadm/gc/gc.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/cmd/clusterawsadm/gc/gc_test.go b/cmd/clusterawsadm/gc/gc_test.go index 8e890579aa..16b45f942b 100644 --- a/cmd/clusterawsadm/gc/gc_test.go +++ b/cmd/clusterawsadm/gc/gc_test.go @@ -22,7 +22,6 @@ import ( "testing" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,7 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" ) @@ -105,7 +104,7 @@ func TestEnableGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -176,7 +175,7 @@ func TestDisableGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -271,7 +270,7 @@ func TestConfigureGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -304,11 +303,10 @@ func newManagedCluster(name string, excludeInfra bool) []client.Object { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: name, - Namespace: "default", - Kind: "AWSManagedControlPlane", - APIVersion: ekscontrolplanev1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSManagedControlPlane", + Name: name, + APIGroup: ekscontrolplanev1.GroupVersion.Group, }, }, }, @@ -351,11 +349,10 @@ func newUnManagedCluster(name string, excludeInfra bool) []client.Object { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: name, - Namespace: "default", - Kind: "AWSCluster", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSCluster", + APIGroup: infrav1.GroupVersion.Group, }, }, }, diff --git a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml index 7c6b948992..619e283704 100644 --- a/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml +++ b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml @@ -515,51 +515,56 @@ spec: conditions: description: Conditions defines current service state of the EKSConfig. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object @@ -568,12 +573,21 @@ spec: description: DataSecretName is the name of the secret that stores the bootstrap data script. type: string - failureMessage: - description: FailureMessage will be set on non-retryable errors - type: string - failureReason: - description: FailureReason will be set on non-retryable errors - type: string + initialization: + description: |- + Initialization provides observations of the Machine initialization process. + NOTE: Fields in this struct are part of the Cluster API contract and are used to orchestrate initial Machine provisioning. + The value of those fields is never updated after provisioning is completed. + Use conditions to monitor the operational state of the Machine. + properties: + bootstrapDataSecretCreated: + description: |- + BootstrapDataSecretCreated is true when the bootstrap provider reports that the Machine's boostrap secret is created. + NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial Machine provisioning. + The value of this field is never updated after provisioning is completed. + Use conditions to monitor the operational state of the Machine's BootstrapSecret. + type: boolean + type: object observedGeneration: description: ObservedGeneration is the latest generation observed by the controller. diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index 937de1cc32..0c00177db6 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -154,18 +154,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object disableVPCCNI: default: false @@ -1555,54 +1556,57 @@ spec: - id type: object conditions: - description: Conditions specifies the cpnditions for the managed control - plane items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object @@ -1614,9 +1618,11 @@ spec: is managed by an external service such as AKS, EKS, GKE, etc. type: boolean failureDomains: - additionalProperties: + description: FailureDomains specifies a list fo available availability + zones that can be used + items: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -1629,10 +1635,15 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object - description: FailureDomains specifies a list fo available availability - zones that can be used - type: object + type: array failureMessage: description: |- ErrorMessage indicates that there is a terminal problem reconciling the @@ -2365,18 +2376,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object eksClusterName: description: |- @@ -3799,58 +3811,67 @@ spec: - id type: object conditions: - description: Conditions specifies the cpnditions for the managed control + description: Conditions specifies the conditions for the managed control plane items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object + maxItems: 32 type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map externalManagedControlPlane: default: true description: |- @@ -3858,9 +3879,11 @@ spec: is managed by an external service such as AKS, EKS, GKE, etc. type: boolean failureDomains: - additionalProperties: + description: FailureDomains specifies a list of available availability + zones that can be used + items: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -3873,15 +3896,15 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object - description: FailureDomains specifies a list fo available availability - zones that can be used - type: object - failureMessage: - description: |- - ErrorMessage indicates that there is a terminal problem reconciling the - state, and will be set to a descriptive error message. - type: string + type: array identityProviderStatus: description: |- IdentityProviderStatus holds the status for @@ -3895,11 +3918,20 @@ spec: provider type: string type: object - initialized: + initialization: description: |- - Initialized denotes whether or not the control plane has the + Initialization denotes whether or not the control plane has the uploaded kubernetes config-map. - type: boolean + minProperties: 1 + properties: + infrastructureProvisioned: + description: |- + InfrastructureProvisioned is true when the infrastructure provider reports that the Machine's infrastructure is fully provisioned. + NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial Machine provisioning. + The value of this field is never updated after provisioning is completed. + Use conditions to monitor the operational state of the Machine's infrastructure. + type: boolean + type: object networkStatus: description: Networks holds details about the AWS networking resources used by the control plane diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml index 7a0abb3cf8..b84761dacb 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml @@ -150,20 +150,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object eksClusterName: description: |- diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml index e0c2f57080..8736a47e95 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml @@ -164,18 +164,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object credentialsSecretRef: description: |- @@ -835,51 +836,56 @@ spec: description: Conditions specifies the conditions for the managed control plane items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object @@ -893,39 +899,30 @@ spec: ExternalManagedControlPlane indicates to cluster-api that the control plane is managed by an external service such as AKS, EKS, GKE, etc. type: boolean - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the state and will be set to a descriptive error message. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the spec or the configuration of - the controller, and that manual intervention is required. - type: string id: description: ID is the cluster ID given by ROSA. type: string - initialized: + initialization: description: |- Initialized denotes whether or not the control plane has the uploaded kubernetes config-map. - type: boolean + minProperties: 1 + properties: + controlPlaneInitialized: + description: |- + controlPlaneInitialized is true when the control plane provider reports that the Kubernetes control plane is initialized; + usually a control plane is considered initialized when it can accept requests, no matter if this happens before + the control plane is fully provisioned or not. + NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial Cluster provisioning. + type: boolean + type: object oidcEndpointURL: description: OIDCEndpointURL is the endpoint url for the managed OIDC provider. type: string - ready: - default: false - description: Ready denotes that the ROSAControlPlane API Server is - ready to receive requests. - type: boolean version: description: OpenShift semantic version, for example "4.14.5". type: string - required: - - ready type: object type: object served: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index 83416aa9ae..fe4c6d1ef5 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -109,18 +109,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration for @@ -613,62 +614,65 @@ spec: - id type: object conditions: - description: Conditions provide observations of the operational state - of a Cluster API resource. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array failureDomains: - additionalProperties: + items: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -681,9 +685,15 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object - description: FailureDomains is a slice of FailureDomains. - type: object + type: array networkStatus: description: NetworkStatus encapsulates AWS networking resources. properties: @@ -975,18 +985,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration for @@ -2533,62 +2544,65 @@ spec: - id type: object conditions: - description: Conditions provide observations of the operational state - of a Cluster API resource. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array failureDomains: - additionalProperties: + items: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -2601,9 +2615,15 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object - description: FailureDomains is a slice of FailureDomains. - type: object + type: array networkStatus: description: NetworkStatus encapsulates AWS networking resources. properties: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml index e4a0a6cf58..d1b4ec1381 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml @@ -124,20 +124,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration @@ -553,20 +554,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml index d50e7b31c8..16aeef6a4c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml @@ -329,93 +329,60 @@ spec: conditions: description: Conditions defines current state of the Fargate profile. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the FargateProfile and will contain a more verbose string suitable - for logging and human consumption. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the FargateProfile's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of - FargateProfiles can be added as events to the FargateProfile - object and/or logged in the controller's output. - type: string - failureReason: - description: |- - FailureReason will be set in the event that there is a terminal problem - reconciling the FargateProfile and will contain a succinct value suitable - for machine interpretation. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the FargateProfile's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of - FargateProfiles can be added as events to the FargateProfile object - and/or logged in the controller's output. - type: string ready: default: false description: Ready denotes that the FargateProfile is available. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml index 7bface8e4d..7cbe6b0e35 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml @@ -1252,100 +1252,95 @@ spec: description: ASGStatus is a status string returned by the autoscaling API. type: string + availableReplicas: + description: The number of available replicas for this MachinePool. + A machine is considered available when Machine's Available condition + is true. + format: int32 + type: integer conditions: description: Conditions defines current service state of the AWSMachinePool. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the Machine and will contain a more verbose string suitable - for logging and human consumption. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string - failureReason: - description: |- - FailureReason will be set in the event that there is a terminal problem - reconciling the Machine and will contain a succinct value suitable - for machine interpretation. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string infrastructureMachineKind: description: InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. type: string + initialization: + description: |- + Initialization provides observations of the MachinePool initialization process. + NOTE: Fields in this struct are part of the Cluster API contract and are used to orchestrate initial MachinePool provisioning. + The value of those fields is never updated after provisioning is completed. + Use conditions to monitor the operational state of the MachinePool. + properties: + bootstrapDataSecretCreated: + description: |- + BootstrapDataSecretCreated is true when the bootstrap provider reports that the MachinePool's boostrap data secret is created. + NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial MachinePool provisioning. + The value of this field is never updated after provisioning is completed. + Use conditions to monitor the operational state of the MachinePool's BootstrapSecret. + type: boolean + infrastructureProvisioned: + description: |- + InfrastructureProvisioned is true when the infrastructure provider reports that the MachinePool's infrastructure is fully provisioned. + NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial MachinePool provisioning. + The value of this field is never updated after provisioning is completed. + Use conditions to monitor the operational state of the MachinePool's infrastructure. + type: boolean + type: object instances: description: Instances contains the status for each instance in the pool @@ -1369,11 +1364,15 @@ spec: launchTemplateVersion: description: The version of the launch template type: string - ready: - description: Ready is true when the provider resource is ready. - type: boolean - replicas: - description: Replicas is the most recently observed number of replicas + readyReplicas: + description: The number of ready replicas for this MachinePool. A + machine is considered ready when Machine's Ready condition is true. + format: int32 + type: integer + upToDateReplicas: + description: The number of up-to-date replicas targeted by this MachinePool. + A machine is considered available when Machine's UpToDate condition + is true. format: int32 type: integer type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml index d7aa2cfef6..6cb9a84985 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml @@ -1218,93 +1218,60 @@ spec: conditions: description: Conditions defines current service state of the AWSMachine. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the Machine and will contain a more verbose string suitable - for logging and human consumption. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string - failureReason: - description: |- - FailureReason will be set in the event that there is a terminal problem - reconciling the Machine and will contain a succinct value suitable - for machine interpretation. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the Machine's spec or the configuration of - the controller, and that manual intervention is required. Examples - of terminal errors would be invalid combinations of settings in the - spec, values that are unsupported by the controller, or the - responsible controller itself being critically misconfigured. - - Any transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string instanceState: description: InstanceState is the state of the AWS instance for this machine. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml index ad7df80fa0..0949c4c90b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml @@ -60,18 +60,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object type: object status: @@ -80,59 +81,66 @@ spec: conditions: description: Conditions defines current service state of the AWSManagedCluster. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array failureDomains: - additionalProperties: + description: FailureDomains specifies a list fo available availability + zones that can be used + items: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -145,10 +153,15 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object - description: FailureDomains specifies a list fo available availability - zones that can be used - type: object + type: array ready: description: Ready is when the AWSManagedControlPlane has a API server URL. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml index 8b440da8a0..adee7a883c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml @@ -56,20 +56,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object type: object required: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml index 11fdfa422c..b286faa199 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml @@ -1124,51 +1124,56 @@ spec: description: Conditions defines current service state of the managed machine pool items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml index d3e8b80715..0bdd8f8cfe 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml @@ -60,18 +60,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object type: object status: @@ -80,59 +81,66 @@ spec: conditions: description: Conditions defines current service state of the ROSACluster. items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array failureDomains: - additionalProperties: + description: FailureDomains specifies a list of available availability + zones that can be used + items: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -145,10 +153,15 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object - description: FailureDomains specifies a list fo available availability - zones that can be used - type: object + type: array ready: description: Ready is when the ROSAControlPlane has a API server URL. type: boolean diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml index e6a27a9ddf..b7eed3a802 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml @@ -236,66 +236,60 @@ spec: description: Conditions defines current service state of the managed machine pool items: - description: Condition defines an observation of a Cluster API resource - operational state. + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when - the API field changed is acceptable. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. - This field may be empty. - maxLength: 10240 - minLength: 1 + This may be an empty string. + maxLength: 32768 type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer reason: description: |- - reason is the reason for the condition's last transition in CamelCase. - The specific API may choose whether or not this field is considered a guaranteed API. - This field may be empty. - maxLength: 256 + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 minLength: 1 - type: string - severity: - description: |- - severity provides an explicit classification of Reason code, so the users or machines can immediately - understand the current situation and act accordingly. - The Severity field MUST be set only when Status=False. - maxLength: 32 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability to deconflict is important. - maxLength: 256 - minLength: 1 + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - lastTransitionTime + - message + - reason - status - type type: object type: array - failureMessage: - description: |- - FailureMessage will be set in the event that there is a terminal problem - reconciling the state and will be set to a descriptive error message. - - This field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over - time (like service outages), but instead indicate that something is - fundamentally wrong with the spec or the configuration of - the controller, and that manual intervention is required. - type: string id: description: ID is the ID given by ROSA. type: string diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index d0ffbbc462..e62d2d6ef9 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -21,8 +21,10 @@ import ( "fmt" "time" + "github.com/aws/smithy-go/ptr" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" @@ -47,9 +49,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" capiannotations "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -291,17 +292,29 @@ func (r *AWSClusterReconciler) reconcileLoadBalancer(ctx context.Context, cluste if err := elbService.ReconcileLoadbalancers(ctx); err != nil { clusterScope.Error(err, "failed to reconcile load balancer") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.LoadBalancerFailedReason, + Message: fmt.Sprintf("%s", err), + }) return nil, err } if awsCluster.Status.Network.APIServerELB.DNSName == "" { - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitForDNSNameReason, + }) clusterScope.Info("Waiting on API server ELB DNS name") return &retryAfterDuration, nil } - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionTrue, + }) awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: awsCluster.Status.Network.APIServerELB.DNSName, @@ -336,12 +349,21 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope if err := sgService.ReconcileSecurityGroups(); err != nil { clusterScope.Error(err, "failed to reconcile security groups") - conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.ClusterSecurityGroupReconciliationFailedReason, + }) return reconcile.Result{}, err } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.BastionHostReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.BastionHostFailedReason, + Message: fmt.Sprintf("%s", err), + }) clusterScope.Error(err, "failed to reconcile bastion host") return reconcile.Result{}, err } @@ -361,10 +383,18 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope } if err := s3Service.ReconcileBucket(ctx); err != nil { - conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.S3BucketReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.S3BucketFailedReason, + Message: fmt.Sprintf("%s", err), + }) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name) } - conditions.MarkTrue(awsCluster, infrav1.S3BucketReadyCondition) + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.S3BucketReadyCondition, + Status: metav1.ConditionTrue, + }) for _, subnet := range clusterScope.Subnets().FilterPrivate() { found := false @@ -375,8 +405,8 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope } } - clusterScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{ - ControlPlane: found, + clusterScope.SetFailureDomain(subnet.AvailabilityZone, []clusterv1.FailureDomain{ + {Name: subnet.AvailabilityZone, ControlPlane: ptr.Bool(found)}, }) } @@ -419,18 +449,21 @@ func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(_ context.Con } // Make sure the ref is set - if c.Spec.InfrastructureRef == nil { + if !c.Spec.InfrastructureRef.IsDefined() { log.Trace("Cluster does not have an InfrastructureRef, skipping mapping.") return nil } - if c.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSCluster" { + if c.Spec.InfrastructureRef.GroupKind().Kind != "AWSCluster" { log.Trace("Cluster has an InfrastructureRef for a different type, skipping mapping.") return nil } awsCluster := &infrav1.AWSCluster{} - key := types.NamespacedName{Namespace: c.Spec.InfrastructureRef.Namespace, Name: c.Spec.InfrastructureRef.Name} + key := types.NamespacedName{ + Name: c.Spec.InfrastructureRef.Name, + Namespace: c.Namespace, + } if err := r.Get(ctx, key, awsCluster); err != nil { log.Error(err, "Failed to get AWS cluster") @@ -457,21 +490,36 @@ func (r *AWSClusterReconciler) checkForExternalControlPlaneLoadBalancer(clusterS switch { case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0 && awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitForExternalControlPlaneEndpointReason, + }) return &requeueAfterPeriod case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0: clusterScope.Info("AWSCluster control plane endpoint host is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitForExternalControlPlaneEndpointReason, + }) return &requeueAfterPeriod case awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint port is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitForExternalControlPlaneEndpointReason, + }) return &requeueAfterPeriod default: - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + conditions.Set(awsCluster, metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionTrue, + }) return nil } diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go index 64dbd30c44..26c4bca968 100644 --- a/controllers/awscluster_controller_test.go +++ b/controllers/awscluster_controller_test.go @@ -29,7 +29,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" @@ -43,7 +42,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -156,7 +155,7 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(cluster.Spec.ControlPlaneEndpoint.Host).To(BeEmpty()) g.Expect(cluster.Spec.ControlPlaneEndpoint.Port).To(BeZero()) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1.ConditionSeverityInfo, reason: infrav1.WaitForExternalControlPlaneEndpointReason}, + {conditionType: infrav1.LoadBalancerReadyCondition, status: metav1.ConditionFalse, severity: clusterv1.ConditionSeverityWarning, reason: infrav1.WaitForExternalControlPlaneEndpointReason}, }) // Mimicking an external operator patching the cluster with an already provisioned Load Balancer: // this could be done by a human who provisioned a LB, or by a Control Plane provider. @@ -176,11 +175,11 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.BastionHostReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.VpcReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.SubnetsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.LoadBalancerReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, }) }) t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC", func(t *testing.T) { @@ -275,10 +274,10 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.BastionHostReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.VpcReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.SubnetsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, }) }) t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC and a network type load balancer", func(t *testing.T) { @@ -382,10 +381,10 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.BastionHostReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.VpcReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.SubnetsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, }) }) t.Run("Should successfully reconcile AWSCluster creation with managed VPC", func(t *testing.T) { @@ -475,10 +474,10 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-new")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.BastionHostReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.VpcReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1.SubnetsReadyCondition, status: metav1.ConditionTrue, severity: "", reason: ""}, }) // Information should get written back into the `ClusterScope` object. Keeping it up to date means that @@ -651,15 +650,15 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(ctx, cs) g.Expect(err).To(BeNil()) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.SecondaryCidrsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletingReason}, - {infrav1.RouteTablesReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.VpcEndpointsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.NatGatewaysReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.InternetGatewayReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.SubnetsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.VpcReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1.LoadBalancerReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.BastionHostReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.SecondaryCidrsReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletingReason}, + {infrav1.RouteTablesReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.VpcEndpointsReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.NatGatewaysReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.InternetGatewayReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.SubnetsReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.VpcReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, }) }) } diff --git a/controllers/awscluster_controller_unit_test.go b/controllers/awscluster_controller_unit_test.go index ee2d0bb9cf..873c31b34f 100644 --- a/controllers/awscluster_controller_unit_test.go +++ b/controllers/awscluster_controller_unit_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -244,7 +244,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { }) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).To(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionTrue, "", ""}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, metav1.ConditionTrue, "", ""}}) g.Expect(awsCluster.GetFinalizers()).To(ContainElement(infrav1.ClusterFinalizer)) }) @@ -320,7 +320,10 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.ClusterSecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.ClusterSecurityGroupReconciliationFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ + { + infrav1.ClusterSecurityGroupsReadyCondition, + metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.ClusterSecurityGroupReconciliationFailedReason}}) }) t.Run("Should fail AWSCluster create with BastionHostReadyCondition status false", func(t *testing.T) { g := NewWithT(t) @@ -343,7 +346,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.BastionHostFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.BastionHostFailedReason}}) }) t.Run("Should fail AWSCluster create with failure in LoadBalancer reconciliation", func(t *testing.T) { g := NewWithT(t) @@ -367,7 +370,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.LoadBalancerFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.LoadBalancerFailedReason}}) }) t.Run("Should fail AWSCluster create with LoadBalancer reconcile failure with WaitForDNSName condition as false", func(t *testing.T) { g := NewWithT(t) @@ -391,7 +394,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).To(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitForDNSNameReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitForDNSNameReason}}) }) }) }) diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 445bab678c..9f367e1301 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -32,10 +32,12 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/utils/ptr" + clusterapiv1beta1util "sigs.k8s.io/cluster-api-provider-aws/v2/util/clusterapiv1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -60,7 +62,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" @@ -294,7 +296,7 @@ func (r *AWSMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma return controller.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(requeueAWSMachinesForUnpausedCluster), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ) } @@ -335,13 +337,22 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope // all the other errors are blocking. // Because we are reconciling all load balancers, attempt to treat the error as a list of errors. if err = kerrors.FilterOut(err, elb.IsAccessDenied, elb.IsNotFound); err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("failed to reconcile LB attachment: %+v", err), + }) return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err) } } if machineScope.IsControlPlane() { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + }) } if feature.Gates.Enabled(feature.EventBridgeInstanceState) { @@ -366,7 +377,12 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope machineScope.Info("Terminating EC2 instance", "instance-id", instance.ID) // Set the InstanceReadyCondition and patch the object before the blocking operation - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + }) + if err := machineScope.PatchObject(); err != nil { machineScope.Error(err, "failed to patch object") return ctrl.Result{}, err @@ -374,11 +390,21 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope if err := ec2Service.TerminateInstance(instance.ID); err != nil { machineScope.Error(err, "failed to terminate instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) // If the AWSMachine specifies NetworkStatus Interfaces, detach the cluster's core Security Groups from them as part of deletion. if len(machineScope.AWSMachine.Spec.NetworkInterfaces) > 0 { @@ -394,7 +420,12 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope "instanceID", instance.ID, ) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.SecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := machineScope.PatchObject(); err != nil { return ctrl.Result{}, err } @@ -402,11 +433,21 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces { if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil { machineScope.Error(err, "failed to detach security groups from instance's network interfaces") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.SecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletionFailedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, err } } - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.SecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) } // Release an Elastic IP when the machine has public IP Address (EIP) with a cluster-wide config @@ -476,16 +517,25 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope return ctrl.Result{}, nil } - if !machineScope.Cluster.Status.InfrastructureReady { + if !*machineScope.Cluster.Status.Initialization.InfrastructureProvisioned { machineScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitingForClusterInfrastructureReason, + }) + return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if !machineScope.IsMachinePoolMachine() && machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { machineScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitingForBootstrapDataReason, + }) return ctrl.Result{}, nil } @@ -495,13 +545,23 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err := r.findInstance(machineScope, ec2svc) if err != nil { machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceNotFoundReason, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, err } if instance == nil && machineScope.IsMachinePoolMachine() { err = errors.New("no instance found for machine pool") machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceNotFoundReason, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, err } @@ -518,7 +578,11 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope if instance == nil { // Avoid a flickering condition between InstanceProvisionStarted and InstanceProvisionFailed if there's a persistent failure with createInstance if conditions.GetReason(machineScope.AWSMachine, infrav1.InstanceReadyCondition) != infrav1.InstanceProvisionFailedReason { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceProvisionStartedReason, + }) if patchErr := machineScope.PatchObject(); patchErr != nil { machineScope.Error(patchErr, "failed to patch conditions") return ctrl.Result{}, patchErr @@ -534,7 +598,12 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err = r.createInstance(ctx, ec2svc, machineScope, clusterScope, objectStoreSvc) if err != nil { machineScope.Error(err, "unable to create instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceProvisionFailedReason, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, err } } @@ -584,13 +653,26 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope case infrav1.InstanceStatePending: machineScope.SetNotReady() shouldRequeue = true - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1.ConditionSeverityWarning, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceNotReadyReason, + Message: fmt.Sprintf("%s", err), + }) case infrav1.InstanceStateStopping, infrav1.InstanceStateStopped: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStoppedReason, clusterv1.ConditionSeverityError, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceStoppedReason, + Message: fmt.Sprintf("%s", err), + }) case infrav1.InstanceStateRunning: machineScope.SetReady() - conditions.MarkTrue(machineScope.AWSMachine, infrav1.InstanceReadyCondition) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionTrue, + }) case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated: machineScope.SetNotReady() @@ -599,19 +681,32 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // and therefore should not be reported as error. machineScope.Info("EC2 instance of machine pool was terminated", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, infrav1.InstanceTerminatedReason, "EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceTerminatedReason, + Message: fmt.Sprintf("%s", err), + }) } else { machineScope.Info("Unexpected EC2 instance termination", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnexpectedTermination", "Unexpected EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1.ConditionSeverityError, "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceTerminatedReason, + Message: fmt.Sprintf("%s", err), + }) } default: machineScope.SetNotReady() machineScope.Info("EC2 instance state is undefined", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnhandledState", "EC2 instance state is undefined") - machineScope.SetFailureReason("UpdateError") - machineScope.SetFailureMessage(errors.Errorf("EC2 instance state %q is undefined", instance.State)) - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "", "") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.InstanceReadyCondition, + Status: metav1.ConditionUnknown, + Reason: "UpdateError", + Message: fmt.Sprintf("EC2 instance state %q is undefined", instance.State), + }) } // reconcile the deletion of the bootstrap data secret now that we have updated instance state @@ -624,8 +719,11 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // For machine pool machines, it is expected that the ASG terminates instances at any time, // so no error is logged for those. if instance.State == infrav1.InstanceStateTerminated { - machineScope.SetFailureReason("UpdateError") - machineScope.SetFailureMessage(errors.Errorf("EC2 instance state %q is unexpected", instance.State)) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: "", + Reason: "UpdateError", + Message: fmt.Sprintf("EC2 instance state %q is unexpected", instance.State), + }) } } @@ -681,11 +779,20 @@ func (r *AWSMachineReconciler) reconcileOperationalState(ec2svc services.EC2Inte // Ensure that the security groups are correct. _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups) if err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.SecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.SecurityGroupsFailedReason, + Message: fmt.Sprintf("%s", err), + }) machineScope.Error(err, "unable to ensure security groups") return err } - conditions.MarkTrue(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition) + + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.SecurityGroupsReadyCondition, + Status: metav1.ConditionTrue, + }) err = r.ensureInstanceMetadataOptions(ec2svc, instance, machineScope.AWSMachine) if err != nil { @@ -712,7 +819,7 @@ func (r *AWSMachineReconciler) deleteEncryptedBootstrapDataSecret(machineScope * } // Do nothing if the AWSMachine is not in a failed state, and is operational from an EC2 perspective, but does not have a node reference - if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && machineScope.Machine.Status.NodeRef == nil && !machineScope.AWSMachineIsDeleted() { + if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && !machineScope.Machine.Status.NodeRef.IsDefined() && !machineScope.AWSMachineIsDeleted() { return nil } machineScope.Info("Deleting unneeded entry from AWS Secret", "secretPrefix", machineScope.GetSecretPrefix()) @@ -923,7 +1030,7 @@ func (r *AWSMachineReconciler) deleteBootstrapData(ctx context.Context, machineS func (r *AWSMachineReconciler) deleteIgnitionBootstrapDataFromS3(ctx context.Context, machineScope *scope.MachineScope, objectStoreSvc services.ObjectStoreInterface) error { // Do nothing if the AWSMachine is not in a failed state, and is operational from an EC2 perspective, but does not have a node reference - if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && machineScope.Machine.Status.NodeRef == nil && !machineScope.AWSMachineIsDeleted() { + if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && !machineScope.Machine.Status.NodeRef.IsDefined() && !machineScope.AWSMachineIsDeleted() { return nil } @@ -1014,12 +1121,20 @@ func (r *AWSMachineReconciler) registerInstanceToClassicLB(ctx context.Context, if err := elbsvc.RegisterInstanceWithAPIServerELB(ctx, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with classic load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.ELBAttachFailedReason, + Message: fmt.Sprintf("%s", err), + }) return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer", i.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with classic load balancer", i.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionTrue, + }) return nil } @@ -1039,19 +1154,32 @@ func (r *AWSMachineReconciler) registerInstanceToV2LB(ctx context.Context, machi if ptr.Deref(machineScope.GetInstanceState(), infrav1.InstanceStatePending) != infrav1.InstanceStateRunning { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Cannot register control plane instance %q with load balancer: instance is not running", instance.ID) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityInfo, "instance not running") + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.ELBAttachFailedReason, + Message: "instance not running", + }) return elb.NewInstanceNotRunning("instance is not running") } if err := elbsvc.RegisterInstanceWithAPIServerLB(ctx, instance, lb); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.ELBAttachFailedReason, + Message: fmt.Sprintf("%s", err), + }) return errors.Wrapf(err, "could not register control plane instance %q with load balancer", instance.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with load balancer", instance.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionTrue, + }) return nil } @@ -1070,7 +1198,12 @@ func (r *AWSMachineReconciler) deregisterInstanceFromClassicLB(ctx context.Conte if err := elbsvc.DeregisterInstanceFromAPIServerELB(ctx, instance); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.ELBDetachFailedReason, + Message: fmt.Sprintf("%s", err), + }) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", instance.ID) } @@ -1095,7 +1228,12 @@ func (r *AWSMachineReconciler) deregisterInstanceFromV2LB(ctx context.Context, m if err := elbsvc.DeregisterInstanceFromAPIServerLB(ctx, targetGroupArn, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(machineScope.AWSMachine, metav1.Condition{ + Type: infrav1.ELBAttachedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.ELBDetachFailedReason, + Message: fmt.Sprintf("%s", err), + }) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID) } } @@ -1122,7 +1260,7 @@ func (r *AWSMachineReconciler) AWSClusterToAWSMachines(log logger.Wrapper) handl return nil } - cluster, err := util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta) + cluster, err := clusterapiv1beta1util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: log.Trace("Cluster for AWSCluster not found, skipping mapping.") @@ -1166,7 +1304,7 @@ func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace, result := make([]ctrl.Request, 0, len(machineList.Items)) for _, m := range machineList.Items { log.WithValues("machine", klog.KObj(&m)) - if m.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSMachine" { + if m.Spec.InfrastructureRef.GroupKind().Kind != "AWSMachine" { log.Trace("Machine has an InfrastructureRef for a different type, will not add to reconciliation request.") continue } @@ -1174,7 +1312,7 @@ func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace, log.Trace("Machine has an InfrastructureRef with an empty name, will not add to reconciliation request.") continue } - log.WithValues("awsMachine", klog.KRef(m.Spec.InfrastructureRef.Namespace, m.Spec.InfrastructureRef.Name)) + log.WithValues("awsMachine", klog.KRef(m.Namespace, m.Spec.InfrastructureRef.Name)) log.Trace("Adding AWSMachine to reconciliation request.") result = append(result, ctrl.Request{NamespacedName: client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}}) } @@ -1186,7 +1324,7 @@ func (r *AWSMachineReconciler) getInfraCluster(ctx context.Context, log *logger. var managedControlPlaneScope *scope.ManagedControlPlaneScope var err error - if cluster.Spec.ControlPlaneRef != nil && cluster.Spec.ControlPlaneRef.Kind == "AWSManagedControlPlane" { + if cluster.Spec.ControlPlaneRef.IsDefined() && cluster.Spec.ControlPlaneRef.Kind == "AWSManagedControlPlane" { controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneName := client.ObjectKey{ Namespace: awsMachine.Namespace, diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index c2165e16ef..75ce0fd9d4 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -26,13 +26,14 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" elb "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + "github.com/aws/smithy-go/ptr" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/utils/ptr" + //"k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -43,7 +44,7 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -140,7 +141,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) ms.Machine.Spec.Bootstrap.DataSecretName = aws.String("bootstrap-data") - ms.Machine.Spec.Version = aws.String("test") + ms.Machine.Spec.Version = "test" ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")} ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""} @@ -167,9 +168,9 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileNormal(ctx, ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.SecurityGroupsReadyCondition, corev1.ConditionTrue, "", ""}, - {infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}, - {infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}, + {infrav1.SecurityGroupsReadyCondition, metav1.ConditionTrue, "", ""}, + {infrav1.InstanceReadyCondition, metav1.ConditionTrue, "", ""}, + {infrav1.ELBAttachedCondition, metav1.ConditionTrue, "", ""}, }) g.Expect(ms.AWSMachine.Finalizers).Should(ContainElement(infrav1.MachineFinalizer)) }) @@ -241,8 +242,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, + {infrav1.ELBAttachedCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) @@ -320,7 +321,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) ms.Machine.Spec.Bootstrap.DataSecretName = aws.String("bootstrap-data") - ms.Machine.Spec.Version = aws.String("test") + ms.Machine.Spec.Version = "test" ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")} ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""} @@ -346,7 +347,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileNormal(ctx, ms, cs, cs, cs, cs) g.Expect(err).Should(HaveOccurred()) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionTrue, "", ""}}) g.Expect(ms.AWSMachine.Finalizers).Should(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should fail in reconciling control-plane machine deletion while terminating instance ", func(t *testing.T) { @@ -422,8 +423,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).Should(HaveOccurred()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}, + {infrav1.ELBAttachedCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}, }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) @@ -438,7 +439,9 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s Name: "test", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.Bool(true), + }, }, }, Machine: &clusterv1.Machine{ @@ -447,7 +450,7 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To[string]("bootstrap-data"), + DataSecretName: ptr.String("bootstrap-data"), }, }, }, @@ -529,7 +532,7 @@ func (p *pointsTo) String() string { type conditionAssertion struct { conditionType clusterv1.ConditionType - status corev1.ConditionStatus + status metav1.ConditionStatus severity clusterv1.ConditionSeverity reason string } @@ -537,11 +540,10 @@ type conditionAssertion struct { func expectConditions(g *WithT, m *infrav1.AWSMachine, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := conditions.Get(m, string(c.conditionType)) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) - g.Expect(actual.Severity).To(Equal(c.severity)) g.Expect(actual.Reason).To(Equal(c.reason)) } } diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index e5e9827bdd..2e857397a5 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/util/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -52,8 +53,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -115,7 +116,9 @@ func TestAWSMachineReconciler(t *testing.T) { Name: "test", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To[bool](true), + }, }, }, Machine: &clusterv1.Machine{ @@ -155,7 +158,9 @@ func TestAWSMachineReconciler(t *testing.T) { Client: client, Cluster: &clusterv1.Cluster{ Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + ControlPlaneInitialized: ptr.To[bool](true), + }, }, }, Machine: &clusterv1.Machine{ @@ -215,9 +220,13 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) runningInstance(t, g) - er := "CreateError" - ms.AWSMachine.Status.FailureReason = &er - ms.AWSMachine.Status.FailureMessage = ptr.To[string]("Couldn't create machine") + + expectConditions(g, ms.AWSMachine, []conditionAssertion{ + {status: metav1.ConditionFalse}, + }) + //er := "CreateError" + //ms.AWSMachine.Status.FailureReason = &er + //ms.AWSMachine.Status.FailureMessage = ptr.To[string]("Couldn't create machine") buf := new(bytes.Buffer) klog.SetOutput(buf) @@ -232,7 +241,7 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) runningInstance(t, g) - ms.Cluster.Status.InfrastructureReady = false + ms.Cluster.Status.Initialization.ControlPlaneInitialized = ptr.To[bool](false) buf := new(bytes.Buffer) klog.SetOutput(buf) @@ -240,7 +249,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Cluster infrastructure is not ready yet")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) }) t.Run("should exit immediately if bootstrap data secret reference isn't available", func(t *testing.T) { @@ -258,7 +267,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Bootstrap data secret reference is not yet available")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) }) t.Run("should return an error when we can't list instances by tags", func(t *testing.T) { @@ -393,7 +402,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should set instance to running", func(t *testing.T) { @@ -413,7 +422,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionTrue}, + {conditionType: infrav1.InstanceReadyCondition, status: metav1.ConditionTrue}, }) }) }) @@ -434,8 +443,10 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state is undefined")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("InstanceUnhandledState"))) - g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"NewAWSMachineState\" is undefined"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionUnknown}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{ + {conditionType: infrav1.InstanceReadyCondition, status: metav1.ConditionUnknown}, + {conditionType: clusterv1.ReadyCondition, status: metav1.ConditionFalse, reason: "\"EC2 instance state \\\"NewAWSMachineState\\\" is undefined\""}, + }) }) t.Run("security Groups succeed", func(t *testing.T) { getCoreSecurityGroups := func(t *testing.T, g *WithT) { @@ -464,7 +475,7 @@ func TestAWSMachineReconciler(t *testing.T) { ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return([]string{"sg-2345"}, nil) _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.SecurityGroupsReadyCondition, status: corev1.ConditionTrue}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.SecurityGroupsReadyCondition, status: metav1.ConditionTrue}}) }) t.Run("should not tag instances if there's no tags", func(t *testing.T) { @@ -574,7 +585,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopping))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) t.Run("should then set instance to stopped and unready", func(t *testing.T) { @@ -590,7 +601,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopped))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) t.Run("should then set instance to running and ready once it is restarted", func(t *testing.T) { @@ -647,8 +658,10 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination"))) - g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"terminated\" is unexpected"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceTerminatedReason}}) + //g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"terminated\" is unexpected"))) + expectConditions(g, ms.AWSMachine, []conditionAssertion{ + {infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceTerminatedReason}, + }) }) }) t.Run("should not register if control plane ELB is already registered", func(t *testing.T) { @@ -674,7 +687,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should attach control plane ELB to instance", func(t *testing.T) { g := NewWithT(t) @@ -700,8 +713,8 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}}) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, metav1.ConditionTrue, "", ""}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should store userdata for CloudInit using AWS Secrets Manager only when not skipped", func(t *testing.T) { g := NewWithT(t) @@ -721,7 +734,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("should fail to delete bootstrap data secret if AWSMachine state is updated", func(t *testing.T) { @@ -730,9 +743,8 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) instanceCreate(t, g) - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Namespace: "default", - Name: "test", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "test", } secretSvc.EXPECT().UserData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1) @@ -740,7 +752,7 @@ func TestAWSMachineReconciler(t *testing.T) { secretSvc.EXPECT().Delete(gomock.Any()).Return(errors.New("failed to delete entries from AWS Secret")).Times(1) _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) g.Expect(err).To(MatchError(ContainSubstring("failed to delete entries from AWS Secret"))) }) }) @@ -771,7 +783,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring(expectedError)) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceProvisionFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceProvisionFailedReason}}) }) t.Run("should fail to determine the registration status of control plane ELB", func(t *testing.T) { g := NewWithT(t) @@ -797,7 +809,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring("error describing ELB")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedAttachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should fail to attach control plane ELB to instance", func(t *testing.T) { g := NewWithT(t) @@ -823,7 +835,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).ToNot(BeNil()) g.Expect(err.Error()).To(ContainSubstring("failed to attach ELB")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedAttachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("should fail to delete bootstrap data secret if AWSMachine is in failed state", func(t *testing.T) { @@ -832,7 +844,12 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) ms.SetSecretPrefix("test") - ms.AWSMachine.Status.FailureReason = aws.String("error in AWSMachine") + + // TODO: Fix fail reason condition testing + //expectConditions(g, ms.AWSMachine, []conditionAssertion{ + // {conditionType: metav1.ConditionFalse}, + //}) + //ms.AWSMachine.Status.FailureReason = aws.String("error in AWSMachine") ms.SetSecretCount(0) _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) @@ -862,7 +879,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err.Error()).To(ContainSubstring("json: cannot unmarshal number into Go value of type map[string]interface {}")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("Should fail to update resource tags after instance is created", func(t *testing.T) { g := NewWithT(t) @@ -881,7 +898,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) }) t.Run("While ensuring SecurityGroups", func(t *testing.T) { @@ -912,7 +929,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) t.Run("Should fail to fetch core security groups", func(t *testing.T) { g := NewWithT(t) @@ -930,7 +947,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) t.Run("Should fail if ensureSecurityGroups fails to fetch additional security groups", func(t *testing.T) { g := NewWithT(t) @@ -960,7 +977,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) t.Run("Should fail to update security group", func(t *testing.T) { g := NewWithT(t) @@ -991,7 +1008,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) }) }) @@ -1071,10 +1088,8 @@ func TestAWSMachineReconciler(t *testing.T) { ID: "myMachine", } - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Kind: "Node", - Name: "myMachine", - APIVersion: "v1", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "myMachine", } ms.AWSMachine.Spec.CloudInit = infrav1.CloudInit{ @@ -1133,7 +1148,8 @@ func TestAWSMachineReconciler(t *testing.T) { defer teardown(t, g) setNodeRef(t, g) - ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") + conditions.Set(ms.AWSMachine, metav1.Condition{Status: metav1.ConditionFalse, Reason: "UpdateError"}) + //ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() _, _ = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) @@ -1263,7 +1279,7 @@ func TestAWSMachineReconciler(t *testing.T) { defer teardown(t, g) setSSM(t, g) - ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") + //ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") secretSvc.EXPECT().Delete(gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() _, _ = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) @@ -1416,10 +1432,8 @@ func TestAWSMachineReconciler(t *testing.T) { ID: "myMachine", } - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Kind: "Node", - Name: "myMachine", - APIVersion: "v1", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "myMachine", } ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(instance, nil).AnyTimes() @@ -1480,7 +1494,7 @@ func TestAWSMachineReconciler(t *testing.T) { useIgnitionWithClusterObjectStore(t, g) // TODO: This seems to have no effect on the test result. - ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") + //ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") objectStoreSvc.EXPECT().Delete(gomock.Any(), gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() @@ -1552,7 +1566,7 @@ func TestAWSMachineReconciler(t *testing.T) { useIgnitionWithClusterObjectStore(t, g) // TODO: This seems to have no effect on the test result. - ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") + //ms.AWSMachine.Status.FailureReason = ptr.To("UpdateError") objectStoreSvc.EXPECT().Delete(gomock.Any(), gomock.Any()).Return(nil).Times(1) ec2Svc.EXPECT().TerminateInstance(gomock.Any()).Return(nil).AnyTimes() _, _ = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) @@ -1839,7 +1853,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring("error describing ELB")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedDetachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) }) t.Run("should not do anything if control plane ELB is already detached from instance", func(t *testing.T) { @@ -1862,7 +1876,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}}) }) }) }) @@ -1888,7 +1902,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedV1Beta1Reason}}) }) t.Run("should fail to detach control plane ELB from instance", func(t *testing.T) { g := NewWithT(t) @@ -1912,7 +1926,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).ToNot(BeNil()) g.Expect(err.Error()).To(ContainSubstring("Duplicate access point name for load balancer")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) }) t.Run("should fail if secretPrefix present, but secretCount is not set", func(t *testing.T) { g := NewWithT(t) @@ -1977,10 +1991,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-6", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2017,10 +2031,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-1", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-1", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2049,10 +2063,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-2", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-2", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2078,10 +2092,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-3", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-3", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2115,10 +2129,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "Machine", - Name: "aws-machine-4", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "Machine", + Name: "aws-machine-4", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2148,9 +2162,9 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2358,7 +2372,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { }, }, ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "foo"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{Name: "foo"}, }}, expectError: false, }, @@ -2389,7 +2403,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{Kind: AWSManagedControlPlaneRefKind}, + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{Kind: AWSManagedControlPlaneRefKind}, }, }, expectError: false, @@ -2422,7 +2436,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "aws-test-5"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{Name: "aws-test-5"}, }, }, expectError: false, @@ -2455,7 +2469,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "aws-test-5"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{Name: "aws-test-5"}, }, }, awsCluster: &infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "aws-test-5"}}, @@ -2541,21 +2555,21 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi ownerCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: ns}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "AWSCluster", - Name: "capi-test-1", // assuming same name - Namespace: ns, - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "capi-test-1", // assuming same name + APIGroup: infrav1.GroupVersion.Group, }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Namespace: cp.Namespace, - Name: cp.Name, - APIVersion: kubeadmv1beta1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmControlPlane", + Name: cp.Name, + APIGroup: kubeadmv1beta1.GroupVersion.Group, }, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + ControlPlaneInitialized: ptr.To[bool](true), + }, }, } @@ -2647,11 +2661,11 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi }, }, Status: infrav1.AWSMachineStatus{ - Conditions: clusterv1.Conditions{ + Conditions: []metav1.Condition{ { - Type: "Paused", - Status: corev1.ConditionFalse, - Reason: "NotPaused", + Type: clusterv1.PausedCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.NotPausedReason, }, }, }, diff --git a/controllers/awsmanagedcluster_controller.go b/controllers/awsmanagedcluster_controller.go index 560191634b..bf47ad91ec 100644 --- a/controllers/awsmanagedcluster_controller.go +++ b/controllers/awsmanagedcluster_controller.go @@ -36,7 +36,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -83,7 +83,7 @@ func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { @@ -181,7 +181,7 @@ func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(_ cont } managedClusterRef := cluster.Spec.InfrastructureRef - if managedClusterRef == nil || managedClusterRef.Kind != "AWSManagedCluster" { + if !managedClusterRef.IsDefined() || managedClusterRef.Kind != "AWSManagedCluster" { log.Info("InfrastructureRef is nil or not AWSManagedCluster, skipping mapping") return nil } @@ -190,7 +190,7 @@ func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(_ cont { NamespacedName: types.NamespacedName{ Name: managedClusterRef.Name, - Namespace: managedClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 05f103cfb6..14de53ae18 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -36,7 +36,7 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -127,11 +127,11 @@ var ( func expectAWSClusterConditions(g *WithT, m *infrav1.AWSCluster, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := conditions.Get(m, string(c.conditionType)) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) - g.Expect(actual.Severity).To(Equal(c.severity)) + //g.Expect(actual.Severity).To(Equal(c.severity)) g.Expect(actual.Reason).To(Equal(c.reason)) } } diff --git a/controllers/rosacluster_controller.go b/controllers/rosacluster_controller.go index 8d228ba0f1..66bab5d433 100644 --- a/controllers/rosacluster_controller.go +++ b/controllers/rosacluster_controller.go @@ -22,7 +22,6 @@ import ( cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -31,6 +30,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -48,9 +48,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -111,7 +109,7 @@ func (r *ROSAClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) controlPlane := &rosacontrolplanev1.ROSAControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { @@ -222,7 +220,7 @@ func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Log } rosaClusterRef := cluster.Spec.InfrastructureRef - if rosaClusterRef == nil || rosaClusterRef.Kind != "ROSACluster" { + if !rosaClusterRef.IsDefined() || rosaClusterRef.Kind != "ROSACluster" { log.Info("InfrastructureRef is nil or not ROSACluster, skipping mapping") return nil } @@ -231,7 +229,7 @@ func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Log { NamespacedName: types.NamespacedName{ Name: rosaClusterRef.Name, - Namespace: rosaClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } @@ -262,7 +260,7 @@ func (r *ROSAClusterReconciler) getRosaMachinePoolNames(ctx context.Context, clu } // buildROSAMachinePool returns a ROSAMachinePool and its corresponding MachinePool. -func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, clusterName string, namespace string, nodePool *cmv1.NodePool) (*expinfrav1.ROSAMachinePool, *expclusterv1.MachinePool) { +func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, clusterName string, namespace string, nodePool *cmv1.NodePool) (*expinfrav1.ROSAMachinePool, *clusterv1.MachinePool) { rosaMPSpec := utils.NodePoolToRosaMachinePoolSpec(nodePool) rosaMachinePool := &expinfrav1.ROSAMachinePool{ TypeMeta: metav1.TypeMeta{ @@ -278,9 +276,9 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste }, Spec: rosaMPSpec, } - machinePool := &expclusterv1.MachinePool{ + machinePool := &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ @@ -290,7 +288,7 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste clusterv1.ClusterNameLabel: clusterName, }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: clusterName, Replicas: ptr.To(int32(1)), Template: clusterv1.MachineTemplateSpec{ @@ -299,10 +297,10 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To(string("")), }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: expinfrav1.GroupVersion.String(), - Kind: "ROSAMachinePool", - Name: rosaMachinePool.Name, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: expinfrav1.GroupVersion.Group, + Kind: "ROSAMachinePool", + Name: rosaMachinePool.Name, }, }, }, @@ -314,7 +312,7 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste // syncROSAClusterNodePools ensure every NodePool has a MachinePool and create a corresponding MachinePool if it does not exist. func (r *ROSAClusterReconciler) syncROSAClusterNodePools(ctx context.Context, controlPlane *rosacontrolplanev1.ROSAControlPlane, rosaScope *scope.ROSAControlPlaneScope) error { - if controlPlane.Status.Ready { + if controlPlane.Status.Initialization.ControlPlaneInitialized { if r.NewOCMClient == nil { return fmt.Errorf("failed to create OCM client: NewOCMClient is nil") } diff --git a/controllers/rosacluster_controller_test.go b/controllers/rosacluster_controller_test.go index 0a7dd42c0b..5151cd5481 100644 --- a/controllers/rosacluster_controller_test.go +++ b/controllers/rosacluster_controller_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/aws/smithy-go/ptr" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" @@ -43,8 +44,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -136,19 +136,17 @@ func TestRosaClusterReconcile(t *testing.T) { UID: types.UID("capi-cluster-1"), }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: rosaCluster.Name, - Kind: "ROSACluster", - APIVersion: expinfrav1.GroupVersion.String(), - Namespace: ns.Name, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaCluster.Name, + Kind: "ROSACluster", + APIGroup: expinfrav1.GroupVersion.Group, }, - ControlPlaneRef: &corev1.ObjectReference{ - Name: rosaControlPlane.Name, - Kind: "ROSAControlPlane", - APIVersion: rosacontrolplanev1.GroupVersion.String(), - Namespace: ns.Name, + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaControlPlane.Name, + Kind: "ROSAControlPlane", + APIGroup: expinfrav1.GroupVersion.Group, }, - Paused: false, + Paused: ptr.Bool(false), }, } @@ -169,7 +167,7 @@ func TestRosaClusterReconcile(t *testing.T) { // set controlplane status rosaCPPatch, err := patch.NewHelper(rosaControlPlane, testEnv) - rosaControlPlane.Status.Ready = true + rosaControlPlane.Status.Initialization.ControlPlaneInitialized = true rosaControlPlane.Status.Version = "4.19.20" rosaControlPlane.Status.ID = rosaClusterName g.Expect(rosaCPPatch.Patch(ctx, rosaControlPlane)).To(Succeed()) @@ -177,11 +175,11 @@ func TestRosaClusterReconcile(t *testing.T) { // set rosaCluster pause conditions rosaClsPatch, err := patch.NewHelper(rosaCluster, testEnv) - rosaCluster.Status.Conditions = clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.PausedV1Beta2Condition, - Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, + rosaCluster.Status.Conditions = []metav1.Condition{ + metav1.Condition{ + Type: clusterv1.PausedCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.PausedReason, Message: "", }, } @@ -190,11 +188,11 @@ func TestRosaClusterReconcile(t *testing.T) { // set capiCluster pause condition clsPatch, err := patch.NewHelper(capiCluster, testEnv) - capiCluster.Status.Conditions = clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.PausedV1Beta2Condition, - Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, + capiCluster.Status.Conditions = []metav1.Condition{ + metav1.Condition{ + Type: clusterv1.PausedCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.PausedReason, Message: "", }, } @@ -282,7 +280,7 @@ func TestRosaClusterReconcile(t *testing.T) { errRosaMP := testEnv.Get(ctx, keyRosaMP, rosaMachinePool) g.Expect(errRosaMP).ToNot(HaveOccurred()) - machinePool := &expclusterv1.MachinePool{} + machinePool := &clusterv1.MachinePool{} keyMP := client.ObjectKey{Name: nodePoolName, Namespace: ns.Name} errMP := testEnv.Get(ctx, keyMP, machinePool) g.Expect(errMP).ToNot(HaveOccurred()) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 4ee71f9d07..16bd75006c 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -31,9 +31,8 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( @@ -54,7 +53,6 @@ func setup() { utilruntime.Must(rosacontrolplanev1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(corev1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go index a965bef381..60124e12c2 100644 --- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -244,7 +244,7 @@ type AWSManagedControlPlaneStatus struct { Network infrav1.NetworkStatus `json:"networkStatus,omitempty"` // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains []clusterv1.FailureDomain `json:"failureDomains,omitempty"` // Bastion holds details of the instance that is used as a bastion jump box // +optional Bastion *infrav1.Instance `json:"bastion,omitempty"` @@ -268,7 +268,8 @@ type AWSManagedControlPlaneStatus struct { // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions specifies the cpnditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + + Conditions []metav1.Condition `json:"conditions,omitempty"` // Addons holds the current status of the EKS addons // +optional Addons []AddonState `json:"addons,omitempty"` @@ -308,12 +309,12 @@ type AWSManagedControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions { +func (r *AWSManagedControlPlane) GetConditions() []metav1.Condition { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *AWSManagedControlPlane) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/controlplane/eks/api/v1beta1/conditions_consts.go b/controlplane/eks/api/v1beta1/conditions_consts.go index 04b7452b19..fa03018838 100644 --- a/controlplane/eks/api/v1beta1/conditions_consts.go +++ b/controlplane/eks/api/v1beta1/conditions_consts.go @@ -16,45 +16,43 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" + EKSControlPlaneReadyCondition = "EKSControlPlaneReady" // EKSControlPlaneCreatingCondition condition reports on whether the eks // control plane is creating. - EKSControlPlaneCreatingCondition clusterv1.ConditionType = "EKSControlPlaneCreating" + EKSControlPlaneCreatingCondition = "EKSControlPlaneCreating" // EKSControlPlaneUpdatingCondition condition reports on whether the eks // control plane is updating. - EKSControlPlaneUpdatingCondition clusterv1.ConditionType = "EKSControlPlaneUpdating" + EKSControlPlaneUpdatingCondition = "EKSControlPlaneUpdating" // EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed" ) const ( // IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles. - IAMControlPlaneRolesReadyCondition clusterv1.ConditionType = "IAMControlPlaneRolesReady" + IAMControlPlaneRolesReadyCondition = "IAMControlPlaneRolesReady" // IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles. IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed" ) const ( // IAMAuthenticatorConfiguredCondition condition reports on the successful reconciliation of aws-iam-authenticator config. - IAMAuthenticatorConfiguredCondition clusterv1.ConditionType = "IAMAuthenticatorConfigured" + IAMAuthenticatorConfiguredCondition = "IAMAuthenticatorConfigured" // IAMAuthenticatorConfigurationFailedReason used to report failures while reconciling the aws-iam-authenticator config. IAMAuthenticatorConfigurationFailedReason = "IAMAuthenticatorConfigurationFailed" ) const ( // EKSAddonsConfiguredCondition condition reports on the successful reconciliation of EKS addons. - EKSAddonsConfiguredCondition clusterv1.ConditionType = "EKSAddonsConfigured" + EKSAddonsConfiguredCondition = "EKSAddonsConfigured" // EKSAddonsConfiguredFailedReason used to report failures while reconciling the EKS addons. EKSAddonsConfiguredFailedReason = "EKSAddonsConfiguredFailed" ) const ( // EKSIdentityProviderConfiguredCondition condition reports on the successful association of identity provider config. - EKSIdentityProviderConfiguredCondition clusterv1.ConditionType = "EKSIdentityProviderConfigured" + EKSIdentityProviderConfiguredCondition = "EKSIdentityProviderConfigured" // EKSIdentityProviderConfiguredFailedReason used to report failures while reconciling the identity provider config association. EKSIdentityProviderConfiguredFailedReason = "EKSIdentityProviderConfiguredFailed" ) diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go index 9fe8517b2f..7c46f0eaa1 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.conversion.go +++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go @@ -24,12 +24,13 @@ package v1beta1 import ( unsafe "unsafe" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func init() { @@ -388,16 +389,16 @@ func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControl func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta2.AWSManagedControlPlaneStatus, s conversion.Scope) error { out.Network = in.Network - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*[]corev1beta2.FailureDomain)(unsafe.Pointer(&in.FailureDomains)) out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion)) if err := Convert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil { return err } out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane)) - out.Initialized = in.Initialized + // WARNING: in.Initialized requires manual conversion: does not exist in peer-type out.Ready = in.Ready - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Addons = *(*[]v1beta2.AddonState)(unsafe.Pointer(&in.Addons)) if err := Convert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil { return err @@ -405,23 +406,17 @@ func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedContr return nil } -// Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus is an autogenerated conversion function. -func Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta2.AWSManagedControlPlaneStatus, s conversion.Scope) error { - return autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in, out, s) -} - func autoConvert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *v1beta2.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error { out.Network = in.Network - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*[]corev1beta2.FailureDomain)(unsafe.Pointer(&in.FailureDomains)) out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion)) if err := Convert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil { return err } out.ExternalManagedControlPlane = (*bool)(unsafe.Pointer(in.ExternalManagedControlPlane)) - out.Initialized = in.Initialized + // WARNING: in.Initialization requires manual conversion: does not exist in peer-type out.Ready = in.Ready - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions)) out.Addons = *(*[]AddonState)(unsafe.Pointer(&in.Addons)) if err := Convert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil { return err @@ -789,7 +784,7 @@ func Convert_v1beta2_UserMapping_To_v1beta1_UserMapping(in *v1beta2.UserMapping, } func autoConvert_v1beta1_VpcCni_To_v1beta2_VpcCni(in *VpcCni, out *v1beta2.VpcCni, s conversion.Scope) error { - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.Env = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.Env)) return nil } @@ -800,6 +795,6 @@ func Convert_v1beta1_VpcCni_To_v1beta2_VpcCni(in *VpcCni, out *v1beta2.VpcCni, s func autoConvert_v1beta2_VpcCni_To_v1beta1_VpcCni(in *v1beta2.VpcCni, out *VpcCni, s conversion.Scope) error { // WARNING: in.Disable requires manual conversion: does not exist in peer-type - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.Env = *(*[]corev1.EnvVar)(unsafe.Pointer(&in.Env)) return nil } diff --git a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go index f6db3b2da0..116b31ac3d 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go @@ -22,9 +22,10 @@ package v1beta1 import ( "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -190,9 +191,9 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]corev1beta2.FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Bastion != nil { @@ -213,7 +214,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go index a36b35dda3..a53aba45ef 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -273,14 +273,25 @@ type IdentityProviderStatus struct { Status string `json:"status,omitempty"` } +// AWSManagedControlPlaneInitializationStatus provides observations of the AWSManagedControlPlane initialization process. +// +kubebuilder:validation:MinProperties=1 +type AWSManagedControlPlaneInitializationStatus struct { + // InfrastructureProvisioned is true when the infrastructure provider reports that the Machine's infrastructure is fully provisioned. + // NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial Machine provisioning. + // The value of this field is never updated after provisioning is completed. + // Use conditions to monitor the operational state of the Machine's infrastructure. + // +optional + InfrastructureProvisioned bool `json:"infrastructureProvisioned"` +} + // AWSManagedControlPlaneStatus defines the observed state of an Amazon EKS Cluster. type AWSManagedControlPlaneStatus struct { // Networks holds details about the AWS networking resources used by the control plane // +optional Network infrav1.NetworkStatus `json:"networkStatus,omitempty"` - // FailureDomains specifies a list fo available availability zones that can be used + // FailureDomains specifies a list of available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains []clusterv1.FailureDomain `json:"failureDomains,omitempty"` // Bastion holds details of the instance that is used as a bastion jump box // +optional Bastion *infrav1.Instance `json:"bastion,omitempty"` @@ -291,20 +302,23 @@ type AWSManagedControlPlaneStatus struct { // is managed by an external service such as AKS, EKS, GKE, etc. // +kubebuilder:default=true ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"` - // Initialized denotes whether or not the control plane has the + // Initialization denotes whether or not the control plane has the // uploaded kubernetes config-map. // +optional - Initialized bool `json:"initialized"` + Initialization AWSManagedControlPlaneInitializationStatus `json:"initialization,omitempty,omitzero"` + // Ready denotes that the AWSManagedControlPlane API Server is ready to // receive requests and that the VPC infra is ready. // +kubebuilder:default=false Ready bool `json:"ready"` - // ErrorMessage indicates that there is a terminal problem reconciling the - // state, and will be set to a descriptive error message. + + // Conditions specifies the conditions for the managed control plane // +optional - FailureMessage *string `json:"failureMessage,omitempty"` - // Conditions specifies the cpnditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` + // Addons holds the current status of the EKS addons // +optional Addons []AddonState `json:"addons,omitempty"` @@ -337,6 +351,12 @@ type AWSManagedControlPlane struct { Status AWSManagedControlPlaneStatus `json:"status,omitempty"` } +func (r *AWSManagedControlPlane) GetConditions() []metav1.Condition { return r.Status.Conditions } + +func (r *AWSManagedControlPlane) SetConditions(conditions []metav1.Condition) { + r.Status.Conditions = conditions +} + // +kubebuilder:object:root=true // AWSManagedControlPlaneList contains a list of Amazon EKS Managed Control Planes. @@ -346,16 +366,6 @@ type AWSManagedControlPlaneList struct { Items []AWSManagedControlPlane `json:"items"` } -// GetConditions returns the control planes conditions. -func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions { - return r.Status.Conditions -} - -// SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { - r.Status.Conditions = conditions -} - func init() { SchemeBuilder.Register(&AWSManagedControlPlane{}, &AWSManagedControlPlaneList{}) } diff --git a/controlplane/eks/api/v1beta2/conditions_consts.go b/controlplane/eks/api/v1beta2/conditions_consts.go index fc8fa66721..8b7b01ae7d 100644 --- a/controlplane/eks/api/v1beta2/conditions_consts.go +++ b/controlplane/eks/api/v1beta2/conditions_consts.go @@ -16,45 +16,43 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" + EKSControlPlaneReadyCondition = "EKSControlPlaneReady" // EKSControlPlaneCreatingCondition condition reports on whether the eks // control plane is creating. - EKSControlPlaneCreatingCondition clusterv1.ConditionType = "EKSControlPlaneCreating" + EKSControlPlaneCreatingCondition = "EKSControlPlaneCreating" // EKSControlPlaneUpdatingCondition condition reports on whether the eks // control plane is updating. - EKSControlPlaneUpdatingCondition clusterv1.ConditionType = "EKSControlPlaneUpdating" + EKSControlPlaneUpdatingCondition = "EKSControlPlaneUpdating" // EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed" ) const ( // IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles. - IAMControlPlaneRolesReadyCondition clusterv1.ConditionType = "IAMControlPlaneRolesReady" + IAMControlPlaneRolesReadyCondition = "IAMControlPlaneRolesReady" // IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles. IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed" ) const ( // IAMAuthenticatorConfiguredCondition condition reports on the successful reconciliation of aws-iam-authenticator config. - IAMAuthenticatorConfiguredCondition clusterv1.ConditionType = "IAMAuthenticatorConfigured" + IAMAuthenticatorConfiguredCondition = "IAMAuthenticatorConfigured" // IAMAuthenticatorConfigurationFailedReason used to report failures while reconciling the aws-iam-authenticator config. IAMAuthenticatorConfigurationFailedReason = "IAMAuthenticatorConfigurationFailed" ) const ( // EKSAddonsConfiguredCondition condition reports on the successful reconciliation of EKS addons. - EKSAddonsConfiguredCondition clusterv1.ConditionType = "EKSAddonsConfigured" + EKSAddonsConfiguredCondition = "EKSAddonsConfigured" // EKSAddonsConfiguredFailedReason used to report failures while reconciling the EKS addons. EKSAddonsConfiguredFailedReason = "EKSAddonsConfiguredFailed" ) const ( // EKSIdentityProviderConfiguredCondition condition reports on the successful association of identity provider config. - EKSIdentityProviderConfiguredCondition clusterv1.ConditionType = "EKSIdentityProviderConfigured" + EKSIdentityProviderConfiguredCondition = "EKSIdentityProviderConfigured" // EKSIdentityProviderConfiguredFailedReason used to report failures while reconciling the identity provider config association. EKSIdentityProviderConfiguredFailedReason = "EKSIdentityProviderConfiguredFailed" ) diff --git a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go index 807613dc0d..c7bb9be6f6 100644 --- a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go @@ -22,9 +22,10 @@ package v1beta2 import ( "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -54,6 +55,21 @@ func (in *AWSManagedControlPlane) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedControlPlaneInitializationStatus) DeepCopyInto(out *AWSManagedControlPlaneInitializationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedControlPlaneInitializationStatus. +func (in *AWSManagedControlPlaneInitializationStatus) DeepCopy() *AWSManagedControlPlaneInitializationStatus { + if in == nil { + return nil + } + out := new(AWSManagedControlPlaneInitializationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSManagedControlPlaneList) DeepCopyInto(out *AWSManagedControlPlaneList) { *out = *in @@ -190,9 +206,9 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]corev1beta2.FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Bastion != nil { @@ -206,14 +222,10 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane *out = new(bool) **out = **in } - if in.FailureMessage != nil { - in, out := &in.FailureMessage, &out.FailureMessage - *out = new(string) - **out = **in - } + out.Initialization = in.Initialization if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 1a3a3583d5..927a80dcdb 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -22,8 +22,10 @@ import ( "strings" "time" + "github.com/aws/smithy-go/ptr" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -51,10 +53,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -178,7 +180,7 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, awsManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &ekscontrolplanev1.AWSManagedControlPlane{})), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -258,7 +260,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct // Always close the scope defer func() { - applicableConditions := []clusterv1.ConditionType{ + forConditionTypes := conditions.ForConditionTypes{ ekscontrolplanev1.EKSControlPlaneReadyCondition, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, @@ -269,21 +271,32 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct } if managedScope.VPC().IsManaged(managedScope.Name()) { - applicableConditions = append(applicableConditions, + forConditionTypes = append(forConditionTypes, infrav1.InternetGatewayReadyCondition, infrav1.NatGatewaysReadyCondition, infrav1.RouteTablesReadyCondition, infrav1.VpcEndpointsReadyCondition, ) if managedScope.Bastion().Enabled { - applicableConditions = append(applicableConditions, infrav1.BastionHostReadyCondition) + forConditionTypes = append(forConditionTypes, infrav1.BastionHostReadyCondition) } if managedScope.VPC().IsIPv6Enabled() { - applicableConditions = append(applicableConditions, infrav1.EgressOnlyInternetGatewayReadyCondition) + forConditionTypes = append(forConditionTypes, infrav1.EgressOnlyInternetGatewayReadyCondition) } } - conditions.SetSummary(managedScope.ControlPlane, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + summaryOpts := []conditions.SummaryOption{ + forConditionTypes, + } + + readyCondition, err := conditions.NewSummaryCondition(managedScope.ControlPlane, clusterv1.ReadyCondition, summaryOpts...) + if err != nil { + readyCondition = &metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + } + } + conditions.Set(managedScope.ControlPlane, *readyCondition) if err := managedScope.Close(); err != nil && reterr == nil { reterr = err @@ -302,7 +315,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (res ctrl.Result, reterr error) { managedScope.Info("Reconciling AWSManagedControlPlane") - if managedScope.Cluster.Spec.InfrastructureRef == nil { + if !managedScope.Cluster.Spec.InfrastructureRef.IsDefined() { managedScope.Info("InfrastructureRef not set, skipping reconciliation") return ctrl.Result{}, nil } @@ -312,7 +325,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, // infrastructureRef and controlplaneRef. if managedScope.Cluster.Spec.InfrastructureRef.Kind != awsManagedControlPlaneKind { // Wait for the cluster infrastructure to be ready before creating machines - if !managedScope.Cluster.Status.InfrastructureReady { + if !*managedScope.Cluster.Status.Initialization.InfrastructureProvisioned { managedScope.Info("Cluster infrastructure is not ready yet") return ctrl.Result{RequeueAfter: r.WaitInfraPeriod}, nil } @@ -339,12 +352,22 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := sgService.ReconcileSecurityGroups(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(awsManagedControlPlane, metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.ClusterSecurityGroupReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile general security groups for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(awsManagedControlPlane, metav1.Condition{ + Type: infrav1.BastionHostReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.BastionHostFailedReason, + Message: fmt.Sprintf("%s", err), + }) return reconcile.Result{}, fmt.Errorf("failed to reconcile bastion host for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -353,7 +376,12 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := awsnodeService.ReconcileCNI(ctx); err != nil { - conditions.MarkFalse(managedScope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(awsManagedControlPlane, metav1.Condition{ + Type: infrav1.SecondaryCidrsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.SecondaryCidrReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return reconcile.Result{}, fmt.Errorf("failed to reconcile control plane for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -369,14 +397,22 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } } if err := authService.ReconcileIAMAuthenticator(ctx); err != nil { - conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(awsManagedControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, + Status: metav1.ConditionFalse, + Reason: ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } - conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition) + conditions.Set(awsManagedControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, + Status: metav1.ConditionTrue, + }) for _, subnet := range managedScope.Subnets().FilterPrivate() { - managedScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{ - ControlPlane: true, + managedScope.SetFailureDomain(subnet.AvailabilityZone, []clusterv1.FailureDomain{ + {Name: subnet.AvailabilityZone, ControlPlane: ptr.Bool(true)}, }) } @@ -451,8 +487,11 @@ func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o cli } controlPlaneRef := c.Spec.ControlPlaneRef - if controlPlaneRef != nil && controlPlaneRef.Kind == awsManagedControlPlaneKind { - return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} + if controlPlaneRef.IsDefined() && controlPlaneRef.Kind == awsManagedControlPlaneKind { + return []ctrl.Request{{NamespacedName: client.ObjectKey{ + Name: controlPlaneRef.Name, + Namespace: c.Namespace, + }}} } return nil @@ -522,7 +561,7 @@ func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(_ } controlPlaneRef := cluster.Spec.ControlPlaneRef - if controlPlaneRef == nil || controlPlaneRef.Kind != awsManagedControlPlaneKind { + if !controlPlaneRef.IsDefined() || controlPlaneRef.Kind != awsManagedControlPlaneKind { log.Debug("ControlPlaneRef is nil or not AWSManagedControlPlane, skipping mapping") return nil } @@ -531,7 +570,7 @@ func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(_ { NamespacedName: types.NamespacedName{ Name: controlPlaneRef.Name, - Namespace: controlPlaneRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go index 483992024d..fa7c9cc508 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go @@ -56,7 +56,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" ) @@ -161,7 +161,7 @@ func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) { awsManagedControlPlane.Status.Conditions = clusterv1.Conditions{ { Type: "Paused", - Status: corev1.ConditionFalse, + Status: metav1.ConditionFalse, Reason: "NotPaused", }, } diff --git a/controlplane/eks/controllers/helpers_test.go b/controlplane/eks/controllers/helpers_test.go index e79c2265b0..a49ca22265 100644 --- a/controlplane/eks/controllers/helpers_test.go +++ b/controlplane/eks/controllers/helpers_test.go @@ -25,7 +25,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func getAWSManagedControlPlaneScope(cluster *clusterv1.Cluster, awsManagedControlPlane *ekscontrolplanev1.AWSManagedControlPlane) *scope.ManagedControlPlaneScope { diff --git a/controlplane/eks/controllers/suite_test.go b/controlplane/eks/controllers/suite_test.go index c284f3dec2..a4e82ed89e 100644 --- a/controlplane/eks/controllers/suite_test.go +++ b/controlplane/eks/controllers/suite_test.go @@ -29,7 +29,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( diff --git a/controlplane/rosa/api/v1beta2/conditions_consts.go b/controlplane/rosa/api/v1beta2/conditions_consts.go index 8bb0f50427..bbf894654d 100644 --- a/controlplane/rosa/api/v1beta2/conditions_consts.go +++ b/controlplane/rosa/api/v1beta2/conditions_consts.go @@ -16,20 +16,18 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - const ( // ROSAControlPlaneReadyCondition condition reports on the successful reconciliation of ROSAControlPlane. - ROSAControlPlaneReadyCondition clusterv1.ConditionType = "ROSAControlPlaneReady" + ROSAControlPlaneReadyCondition = "ROSAControlPlaneReady" // ROSAControlPlaneValidCondition condition reports whether ROSAControlPlane configuration is valid. - ROSAControlPlaneValidCondition clusterv1.ConditionType = "ROSAControlPlaneValid" + ROSAControlPlaneValidCondition = "ROSAControlPlaneValid" // ROSAControlPlaneUpgradingCondition condition reports whether ROSAControlPlane is upgrading or not. - ROSAControlPlaneUpgradingCondition clusterv1.ConditionType = "ROSAControlPlaneUpgrading" + ROSAControlPlaneUpgradingCondition = "ROSAControlPlaneUpgrading" // ExternalAuthConfiguredCondition condition reports whether external auth has beed correctly configured. - ExternalAuthConfiguredCondition clusterv1.ConditionType = "ExternalAuthConfigured" + ExternalAuthConfiguredCondition = "ExternalAuthConfigured" // ReconciliationFailedReason used to report reconciliation failures. ReconciliationFailedReason = "ReconciliationFailed" diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go index c8a99cea6a..2b1abf94ad 100644 --- a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go @@ -22,7 +22,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // RosaEndpointAccessType specifies the publishing scope of cluster endpoints. @@ -721,32 +721,32 @@ type AWSRolesRef struct { KMSProviderARN string `json:"kmsProviderARN"` } +// KubeadmControlPlaneInitializationStatus provides observations of the ControlPlane initialization process. +// +kubebuilder:validation:MinProperties=1 + +type RosaControlPlaneInitializationStatus struct { + // controlPlaneInitialized is true when the control plane provider reports that the Kubernetes control plane is initialized; + // usually a control plane is considered initialized when it can accept requests, no matter if this happens before + // the control plane is fully provisioned or not. + // NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial Cluster provisioning. + // +optional + ControlPlaneInitialized bool `json:"controlPlaneInitialized"` +} + // RosaControlPlaneStatus defines the observed state of ROSAControlPlane. type RosaControlPlaneStatus struct { // ExternalManagedControlPlane indicates to cluster-api that the control plane // is managed by an external service such as AKS, EKS, GKE, etc. // +kubebuilder:default=true ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"` + // Initialized denotes whether or not the control plane has the // uploaded kubernetes config-map. // +optional - Initialized bool `json:"initialized"` - // Ready denotes that the ROSAControlPlane API Server is ready to receive requests. - // +kubebuilder:default=false - Ready bool `json:"ready"` - // FailureMessage will be set in the event that there is a terminal problem - // reconciling the state and will be set to a descriptive error message. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the spec or the configuration of - // the controller, and that manual intervention is required. - // - // +optional - FailureMessage *string `json:"failureMessage,omitempty"` + Initialization RosaControlPlaneInitializationStatus `json:"initialization,omitempty,omitzero"` + // Conditions specifies the conditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // ID is the cluster ID given by ROSA. ID string `json:"id,omitempty"` @@ -790,12 +790,12 @@ type ROSAControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *ROSAControlPlane) GetConditions() clusterv1.Conditions { +func (r *ROSAControlPlane) GetConditions() []metav1.Condition { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *ROSAControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *ROSAControlPlane) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go index 3e4dfdf8cf..86e99e5d38 100644 --- a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go @@ -22,10 +22,10 @@ package v1beta2 import ( "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expapiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -298,6 +298,21 @@ func (in *RegistrySources) DeepCopy() *RegistrySources { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RosaControlPlaneInitializationStatus) DeepCopyInto(out *RosaControlPlaneInitializationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaControlPlaneInitializationStatus. +func (in *RosaControlPlaneInitializationStatus) DeepCopy() *RosaControlPlaneInitializationStatus { + if in == nil { + return nil + } + out := new(RosaControlPlaneInitializationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RosaControlPlaneSpec) DeepCopyInto(out *RosaControlPlaneSpec) { *out = *in @@ -368,14 +383,10 @@ func (in *RosaControlPlaneStatus) DeepCopyInto(out *RosaControlPlaneStatus) { *out = new(bool) **out = **in } - if in.FailureMessage != nil { - in, out := &in.FailureMessage, &out.FailureMessage - *out = new(string) - **out = **in - } + out.Initialization = in.Initialization if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index 90426d2aab..4753f42c26 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -64,11 +64,11 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" ) @@ -116,7 +116,7 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, rosaControlPlane.GroupVersionKind(), mgr.GetClient(), &expinfrav1.ROSACluster{})), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -232,18 +232,20 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{}, fmt.Errorf("failed to validate ROSAControlPlane.spec: %w", err) } - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition) + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneValidCondition, + Status: metav1.ConditionTrue, + }) if validationMessage != "" { - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneValidCondition, - rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, - clusterv1.ConditionSeverityError, - "%s", - validationMessage) - // dont' requeue because input is invalid and manual intervention is needed. + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneValidCondition, + Status: metav1.ConditionFalse, + Reason: rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, + Message: fmt.Sprintf("%s", validationMessage), + }) + // don't requeue because input is invalid and manual intervention is needed. return ctrl.Result{}, nil } - rosaScope.ControlPlane.Status.FailureMessage = nil cluster, err := ocmClient.GetCluster(rosaScope.ControlPlane.Spec.RosaClusterName, creator) if err != nil && weberr.GetType(err) != weberr.NotFound { @@ -254,19 +256,22 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc rosaScope.ControlPlane.Status.ID = cluster.ID() rosaScope.ControlPlane.Status.ConsoleURL = cluster.Console().URL() rosaScope.ControlPlane.Status.OIDCEndpointURL = cluster.AWS().STS().OIDCEndpointURL() - rosaScope.ControlPlane.Status.Ready = false + rosaScope.ControlPlane.Status.Initialization.ControlPlaneInitialized = false rosaScope.ControlPlane.Status.Version = rosa.RawVersionID(cluster.Version()) switch cluster.Status().State() { case cmv1.ClusterStateReady: - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition) - rosaScope.ControlPlane.Status.Ready = true + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneReadyCondition, + Status: metav1.ConditionTrue, + }) + rosaScope.ControlPlane.Status.Initialization.ControlPlaneInitialized = true apiEndpoint, err := buildAPIEndpoint(cluster) if err != nil { return ctrl.Result{}, err } - rosaScope.ControlPlane.Spec.ControlPlaneEndpoint = *apiEndpoint + rosaScope.ControlPlane.Spec.ControlPlaneEndpoint = apiEndpoint if err := r.updateOCMCluster(rosaScope, ocmClient, cluster, creator); err != nil { return ctrl.Result{}, fmt.Errorf("failed to update rosa control plane: %w", err) @@ -290,24 +295,23 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{}, nil case cmv1.ClusterStateError: errorMessage := cluster.Status().ProvisionErrorMessage() - rosaScope.ControlPlane.Status.FailureMessage = &errorMessage - - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - string(cluster.Status().State()), - clusterv1.ConditionSeverityError, - "%s", - cluster.Status().ProvisionErrorCode()) + + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneReadyCondition, + Status: metav1.ConditionFalse, + Reason: string(cluster.Status().State()), + Message: fmt.Sprintf("%s: %s", cluster.Status().ProvisionErrorCode(), errorMessage), + }) // Cluster is in an unrecoverable state, returning nil error so that the request doesn't get requeued. return ctrl.Result{}, nil } - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - string(cluster.Status().State()), - clusterv1.ConditionSeverityInfo, - "%s", - cluster.Status().Description()) + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneReadyCondition, + Status: metav1.ConditionFalse, + Reason: string(cluster.Status().State()), + Message: cluster.Status().Description(), + }) rosaScope.Info("waiting for cluster to become ready", "state", cluster.Status().State()) // Requeue so that status.ready is set to true when the cluster is fully created. @@ -321,12 +325,12 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc cluster, err = ocmClient.CreateCluster(ocmClusterSpec) if err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - rosacontrolplanev1.ReconciliationFailedReason, - clusterv1.ConditionSeverityError, - "%s", - err.Error()) + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneReadyCondition, + Status: metav1.ConditionFalse, + Reason: rosacontrolplanev1.ReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, fmt.Errorf("failed to create OCM cluster: %w", err) } @@ -382,23 +386,22 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc if cluster.Status().State() != cmv1.ClusterStateUninstalling { if _, err := ocmClient.DeleteCluster(cluster.ID(), bestEffort, creator); err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - rosacontrolplanev1.ROSAControlPlaneDeletionFailedReason, - clusterv1.ConditionSeverityError, - "failed to delete ROSAControlPlane: %s; if the error can't be resolved, set '%s' annotation to force the deletion", - err.Error(), - ROSAControlPlaneForceDeleteAnnotation) + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneReadyCondition, + Status: metav1.ConditionFalse, + Reason: rosacontrolplanev1.ROSAControlPlaneDeletionFailedReason, + Message: fmt.Sprintf("\"failed to delete ROSAControlPlane: %s; if the error can't be resolved, set '%s' annotation to force the deletion\"", err, ROSAControlPlaneForceDeleteAnnotation), + }) return ctrl.Result{}, err } } - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - string(cluster.Status().State()), - clusterv1.ConditionSeverityInfo, - "deleting") - rosaScope.ControlPlane.Status.Ready = false + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneReadyCondition, + Status: metav1.ConditionFalse, + Reason: string(cluster.Status().State()), + Message: "deleting", + }) rosaScope.Info("waiting for cluster to be deleted") // Requeue to remove the finalizer when the cluster is fully deleted. return ctrl.Result{RequeueAfter: time.Second * 60}, nil @@ -440,7 +443,11 @@ func (r *ROSAControlPlaneReconciler) deleteMachinePools(ctx context.Context, ros func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient rosa.OCMClient, cluster *cmv1.Cluster) error { version := rosaScope.ControlPlane.Spec.Version if version == rosa.RawVersionID(cluster.Version()) { - conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, + Status: metav1.ConditionFalse, + Reason: "upgraded", + }) if cluster.Version() != nil { rosaScope.ControlPlane.Status.AvailableUpgrades = cluster.Version().AvailableUpgrades() @@ -464,9 +471,9 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO ack := (rosaScope.ControlPlane.Spec.VersionGate == rosacontrolplanev1.Acknowledge || rosaScope.ControlPlane.Spec.VersionGate == rosacontrolplanev1.AlwaysAcknowledge) scheduledUpgrade, err = rosa.ScheduleControlPlaneUpgrade(ocmClient, cluster, version, time.Now(), ack) if err != nil { - condition := &clusterv1.Condition{ + condition := metav1.Condition{ Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, - Status: corev1.ConditionFalse, + Status: metav1.ConditionFalse, Reason: "failed", Message: fmt.Sprintf("failed to schedule upgrade to version %s: %v", version, err), } @@ -476,9 +483,9 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO } } - condition := &clusterv1.Condition{ + condition := metav1.Condition{ Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, - Status: corev1.ConditionTrue, + Status: metav1.ConditionTrue, Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), } @@ -499,12 +506,12 @@ func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAContr // Update the cluster. rosaScope.Info("Updating cluster") if err := ocmClient.UpdateCluster(cluster.ID(), creator, ocmClusterSpec); err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneValidCondition, - rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, - clusterv1.ConditionSeverityError, - "%s", - err.Error()) + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ROSAControlPlaneValidCondition, + Status: metav1.ConditionFalse, + Reason: rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, + Message: fmt.Sprintf("%s", err), + }) return err } } @@ -591,14 +598,18 @@ func (r *ROSAControlPlaneReconciler) reconcileExternalAuth(ctx context.Context, var errs []error if err := r.reconcileExternalAuthProviders(ctx, externalAuthClient, rosaScope, cluster); err != nil { errs = append(errs, err) - conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ExternalAuthConfiguredCondition, - rosacontrolplanev1.ReconciliationFailedReason, - clusterv1.ConditionSeverityError, - "%s", - err.Error()) + + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ExternalAuthConfiguredCondition, + Status: metav1.ConditionFalse, + Reason: rosacontrolplanev1.ReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) } else { - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition) + conditions.Set(rosaScope.ControlPlane, metav1.Condition{ + Type: rosacontrolplanev1.ExternalAuthConfiguredCondition, + Status: metav1.ConditionTrue, + }) } if err := r.reconcileExternalAuthBootstrapKubeconfig(ctx, externalAuthClient, rosaScope, cluster); err != nil { @@ -877,7 +888,7 @@ func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, ro } } - rosaScope.ControlPlane.Status.Initialized = true + rosaScope.ControlPlane.Status.Initialization.ControlPlaneInitialized = true return nil } @@ -1111,38 +1122,39 @@ func (r *ROSAControlPlaneReconciler) rosaClusterToROSAControlPlane(log *logger.L } controlPlaneRef := cluster.Spec.ControlPlaneRef - if controlPlaneRef == nil || controlPlaneRef.Kind != rosaControlPlaneKind { + if controlPlaneRef.IsDefined() || controlPlaneRef.Kind != rosaControlPlaneKind { log.Debug("ControlPlaneRef is nil or not ROSAControlPlane, skipping mapping") return nil } + // TODO(@tobbbles): Evaluate which namesapce to use here, does it come from cluster? return []ctrl.Request{ { NamespacedName: types.NamespacedName{ Name: controlPlaneRef.Name, - Namespace: controlPlaneRef.Namespace, + Namespace: cluster.Namespace, }, }, } } } -func buildAPIEndpoint(cluster *cmv1.Cluster) (*clusterv1.APIEndpoint, error) { +func buildAPIEndpoint(cluster *cmv1.Cluster) (clusterv1.APIEndpoint, error) { parsedURL, err := url.ParseRequestURI(cluster.API().URL()) if err != nil { - return nil, err + return clusterv1.APIEndpoint{}, err } host, portStr, err := net.SplitHostPort(parsedURL.Host) if err != nil { - return nil, err + return clusterv1.APIEndpoint{}, err } port, err := strconv.Atoi(portStr) if err != nil { - return nil, err + return clusterv1.APIEndpoint{}, err } - return &clusterv1.APIEndpoint{ + return clusterv1.APIEndpoint{ Host: host, Port: int32(port), //#nosec G109 G115 }, nil diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go index 61b8f9ce52..c5304c99b7 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go @@ -52,8 +52,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/controlplane/rosa/controllers/suite_test.go b/controlplane/rosa/controllers/suite_test.go index ebdfce2a76..eeeb4e6323 100644 --- a/controlplane/rosa/controllers/suite_test.go +++ b/controlplane/rosa/controllers/suite_test.go @@ -32,7 +32,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( diff --git a/exp/api/v1beta1/awsfargateprofile_types.go b/exp/api/v1beta1/awsfargateprofile_types.go index 155ab4915a..283eb13c42 100644 --- a/exp/api/v1beta1/awsfargateprofile_types.go +++ b/exp/api/v1beta1/awsfargateprofile_types.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( diff --git a/exp/api/v1beta1/awsmachinepool_types.go b/exp/api/v1beta1/awsmachinepool_types.go index fc70422c03..912b3f7bf4 100644 --- a/exp/api/v1beta1/awsmachinepool_types.go +++ b/exp/api/v1beta1/awsmachinepool_types.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // Constants block. diff --git a/exp/api/v1beta1/awsmanagedmachinepool_types.go b/exp/api/v1beta1/awsmanagedmachinepool_types.go index bd9632f95b..c7cab162bf 100644 --- a/exp/api/v1beta1/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta1/awsmanagedmachinepool_types.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. diff --git a/exp/api/v1beta1/conditions_consts.go b/exp/api/v1beta1/conditions_consts.go index 534ebb2bf9..f91551e0cf 100644 --- a/exp/api/v1beta1/conditions_consts.go +++ b/exp/api/v1beta1/conditions_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go index 933a08f716..ff8f0c8ed2 100644 --- a/exp/api/v1beta1/zz_generated.conversion.go +++ b/exp/api/v1beta1/zz_generated.conversion.go @@ -24,11 +24,12 @@ package v1beta1 import ( unsafe "unsafe" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -585,33 +586,46 @@ func autoConvert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *v1 } func autoConvert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta2.AWSMachinePoolStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.Replicas = in.Replicas - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.Ready requires manual conversion: does not exist in peer-type + // WARNING: in.Replicas requires manual conversion: does not exist in peer-type + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert corev1beta1.Condition to v1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } out.Instances = *(*[]v1beta2.AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) out.LaunchTemplateID = in.LaunchTemplateID out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) - out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) + // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type + // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type out.ASGStatus = (*v1beta2.ASGStatus)(unsafe.Pointer(in.ASGStatus)) return nil } -// Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus is an autogenerated conversion function. -func Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta2.AWSMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in, out, s) -} - func autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *v1beta2.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.Replicas = in.Replicas - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.ReadyReplicas requires manual conversion: does not exist in peer-type + // WARNING: in.AvailableReplicas requires manual conversion: does not exist in peer-type + // WARNING: in.UpToDateReplicas requires manual conversion: does not exist in peer-type + // WARNING: in.Initialization requires manual conversion: does not exist in peer-type + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(corev1beta1.Conditions, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert v1.Condition to corev1beta1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } out.Instances = *(*[]AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) out.LaunchTemplateID = in.LaunchTemplateID out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) // WARNING: in.InfrastructureMachineKind requires manual conversion: does not exist in peer-type - out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.ASGStatus = (*ASGStatus)(unsafe.Pointer(in.ASGStatus)) return nil } @@ -770,7 +784,16 @@ func autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachin out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert corev1beta1.Condition to v1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } @@ -786,7 +809,16 @@ func autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachin out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(corev1beta1.Conditions, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert v1.Condition to corev1beta1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } @@ -915,22 +947,33 @@ func autoConvert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(in *v1 func autoConvert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in *FargateProfileStatus, out *v1beta2.FargateProfileStatus, s conversion.Scope) error { out.Ready = in.Ready - out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type + // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert corev1beta1.Condition to v1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } -// Convert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus is an autogenerated conversion function. -func Convert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in *FargateProfileStatus, out *v1beta2.FargateProfileStatus, s conversion.Scope) error { - return autoConvert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in, out, s) -} - func autoConvert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in *v1beta2.FargateProfileStatus, out *FargateProfileStatus, s conversion.Scope) error { out.Ready = in.Ready - out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) - out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(corev1beta1.Conditions, len(*in)) + for i := range *in { + // FIXME: Provide conversion function to convert v1.Condition to corev1beta1.Condition + compileErrorOnMissingConversion() + } + } else { + out.Conditions = nil + } return nil } diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go index da355ddf67..3919507c2d 100644 --- a/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/exp/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -263,7 +263,7 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -490,7 +490,7 @@ func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolSt } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -632,7 +632,7 @@ func (in *FargateProfileStatus) DeepCopyInto(out *FargateProfileStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/exp/api/v1beta2/awsfargateprofile_types.go b/exp/api/v1beta2/awsfargateprofile_types.go index 3869fd42fa..d65cc210d5 100644 --- a/exp/api/v1beta2/awsfargateprofile_types.go +++ b/exp/api/v1beta2/awsfargateprofile_types.go @@ -23,7 +23,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) var ( @@ -102,47 +101,9 @@ type FargateProfileStatus struct { // +kubebuilder:default=false Ready bool `json:"ready"` - // FailureReason will be set in the event that there is a terminal problem - // reconciling the FargateProfile and will contain a succinct value suitable - // for machine interpretation. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the FargateProfile's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of - // FargateProfiles can be added as events to the FargateProfile object - // and/or logged in the controller's output. - // +optional - FailureReason *string `json:"failureReason,omitempty"` - - // FailureMessage will be set in the event that there is a terminal problem - // reconciling the FargateProfile and will contain a more verbose string suitable - // for logging and human consumption. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the FargateProfile's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of - // FargateProfiles can be added as events to the FargateProfile - // object and/or logged in the controller's output. - // +optional - FailureMessage *string `json:"failureMessage,omitempty"` - // Conditions defines current state of the Fargate profile. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -163,12 +124,12 @@ type AWSFargateProfile struct { } // GetConditions returns the observations of the operational state of the AWSFargateProfile resource. -func (r *AWSFargateProfile) GetConditions() clusterv1.Conditions { +func (r *AWSFargateProfile) GetConditions() []metav1.Condition { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1.Conditions. -func (r *AWSFargateProfile) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed []metav1.Condition. +func (r *AWSFargateProfile) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/awsfargateprofile_webhook.go b/exp/api/v1beta2/awsfargateprofile_webhook.go index ed38ff73ae..8bb46b90b1 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/exp/api/v1beta2/awsfargateprofile_webhook_test.go b/exp/api/v1beta2/awsfargateprofile_webhook_test.go index 7849e0bb35..679e2aab97 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook_test.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook_test.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" utildefaulting "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestAWSFargateProfileDefault(t *testing.T) { diff --git a/exp/api/v1beta2/awsmachinepool_types.go b/exp/api/v1beta2/awsmachinepool_types.go index ef0a219513..942a8412a3 100644 --- a/exp/api/v1beta2/awsmachinepool_types.go +++ b/exp/api/v1beta2/awsmachinepool_types.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // Constants block. @@ -192,19 +191,46 @@ type RefreshPreferences struct { MaxHealthyPercentage *int64 `json:"maxHealthyPercentage,omitempty"` } +type AWSMachinePoolInitializationStatus struct { + // BootstrapDataSecretCreated is true when the bootstrap provider reports that the MachinePool's boostrap data secret is created. + // NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial MachinePool provisioning. + // The value of this field is never updated after provisioning is completed. + // Use conditions to monitor the operational state of the MachinePool's BootstrapSecret. + // +optional + BootstrapDataSecretCreated bool `json:"bootstrapDataSecretCreated"` + + // InfrastructureProvisioned is true when the infrastructure provider reports that the MachinePool's infrastructure is fully provisioned. + // NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial MachinePool provisioning. + // The value of this field is never updated after provisioning is completed. + // Use conditions to monitor the operational state of the MachinePool's infrastructure. + // +optional + InfrastructureProvisioned bool `json:"infrastructureProvisioned"` +} + // AWSMachinePoolStatus defines the observed state of AWSMachinePool. type AWSMachinePoolStatus struct { - // Ready is true when the provider resource is ready. + // The number of ready replicas for this MachinePool. A machine is considered ready when Machine's Ready condition is true. + // +optional + ReadyReplicas *int32 `json:"readyReplicas,omitempty"` + + // The number of available replicas for this MachinePool. A machine is considered available when Machine's Available condition is true. + // +optional + AvailableReplicas *int32 `json:"availableReplicas,omitempty"` + + // The number of up-to-date replicas targeted by this MachinePool. A machine is considered available when Machine's UpToDate condition is true. // +optional - Ready bool `json:"ready"` + UpToDateReplicas *int32 `json:"upToDateReplicas,omitempty"` - // Replicas is the most recently observed number of replicas + // Initialization provides observations of the MachinePool initialization process. + // NOTE: Fields in this struct are part of the Cluster API contract and are used to orchestrate initial MachinePool provisioning. + // The value of those fields is never updated after provisioning is completed. + // Use conditions to monitor the operational state of the MachinePool. // +optional - Replicas int32 `json:"replicas"` + Initialization AWSMachinePoolInitializationStatus `json:"initialization,omitempty,omitzero"` // Conditions defines current service state of the AWSMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // Instances contains the status for each instance in the pool // +optional @@ -221,44 +247,6 @@ type AWSMachinePoolStatus struct { // +optional InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` - // FailureReason will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a succinct value suitable - // for machine interpretation. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - FailureReason *string `json:"failureReason,omitempty"` - - // FailureMessage will be set in the event that there is a terminal problem - // reconciling the Machine and will contain a more verbose string suitable - // for logging and human consumption. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the Machine's spec or the configuration of - // the controller, and that manual intervention is required. Examples - // of terminal errors would be invalid combinations of settings in the - // spec, values that are unsupported by the controller, or the - // responsible controller itself being critically misconfigured. - // - // Any transient errors that occur during the reconciliation of Machines - // can be added as events to the Machine object and/or logged in the - // controller's output. - // +optional - FailureMessage *string `json:"failureMessage,omitempty"` - ASGStatus *ASGStatus `json:"asgStatus,omitempty"` } @@ -306,12 +294,12 @@ func init() { } // GetConditions returns the observations of the operational state of the AWSMachinePool resource. -func (r *AWSMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSMachinePool) GetConditions() []metav1.Condition { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed []metav1.Condition. +func (r *AWSMachinePool) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/awsmanagedmachinepool_types.go b/exp/api/v1beta2/awsmanagedmachinepool_types.go index 0aeb7be0dc..b0dcb4a4fc 100644 --- a/exp/api/v1beta2/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta2/awsmanagedmachinepool_types.go @@ -23,7 +23,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. @@ -294,7 +293,7 @@ type AWSManagedMachinePoolStatus struct { // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -314,12 +313,12 @@ type AWSManagedMachinePool struct { } // GetConditions returns the observations of the operational state of the AWSManagedMachinePool resource. -func (r *AWSManagedMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSManagedMachinePool) GetConditions() []metav1.Condition { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed []metav1.Condition. +func (r *AWSManagedMachinePool) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/conditions_consts.go b/exp/api/v1beta2/conditions_consts.go index 0f3d8675ca..5a4206c0cd 100644 --- a/exp/api/v1beta2/conditions_consts.go +++ b/exp/api/v1beta2/conditions_consts.go @@ -16,11 +16,9 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. - ASGReadyCondition clusterv1.ConditionType = "ASGReady" + ASGReadyCondition = "ASGReady" // ASGNotFoundReason used when the autoscaling group couldn't be retrieved. ASGNotFoundReason = "ASGNotFound" // ASGProvisionFailedReason used for failures during autoscaling group provisioning. @@ -29,7 +27,7 @@ const ( ASGDeletionInProgress = "ASGDeletionInProgress" // LaunchTemplateReadyCondition represents the status of an AWSMachinePool's associated Launch Template. - LaunchTemplateReadyCondition clusterv1.ConditionType = "LaunchTemplateReady" + LaunchTemplateReadyCondition = "LaunchTemplateReady" // LaunchTemplateNotFoundReason is used when an associated Launch Template can't be found. LaunchTemplateNotFoundReason = "LaunchTemplateNotFound" // LaunchTemplateCreateFailedReason used for failures during Launch Template creation. @@ -38,9 +36,9 @@ const ( LaunchTemplateReconcileFailedReason = "LaunchTemplateReconcileFailed" // PreLaunchTemplateUpdateCheckCondition reports if all prerequisite are met for launch template update. - PreLaunchTemplateUpdateCheckCondition clusterv1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess" + PreLaunchTemplateUpdateCheckCondition = "PreLaunchTemplateUpdateCheckSuccess" // PostLaunchTemplateUpdateOperationCondition reports on successfully completes post launch template update operation. - PostLaunchTemplateUpdateOperationCondition clusterv1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess" + PostLaunchTemplateUpdateOperationCondition = "PostLaunchTemplateUpdateOperationSuccess" // PreLaunchTemplateUpdateCheckFailedReason used to report when not all prerequisite are met for launch template update. PreLaunchTemplateUpdateCheckFailedReason = "PreLaunchTemplateUpdateCheckFailed" @@ -48,7 +46,7 @@ const ( PostLaunchTemplateUpdateOperationFailedReason = "PostLaunchTemplateUpdateOperationFailed" // InstanceRefreshStartedCondition reports on successfully starting instance refresh. - InstanceRefreshStartedCondition clusterv1.ConditionType = "InstanceRefreshStarted" + InstanceRefreshStartedCondition = "InstanceRefreshStarted" // InstanceRefreshNotReadyReason used to report instance refresh is not initiated. // If there are instance refreshes that are in progress, then a new instance refresh request will fail. InstanceRefreshNotReadyReason = "InstanceRefreshNotReady" @@ -60,7 +58,7 @@ const ( // AWSMachineDeletionFailed reports if deleting AWSMachines failed. AWSMachineDeletionFailed = "AWSMachineDeletionFailed" // LifecycleHookReadyCondition reports on the status of the lifecycle hook. - LifecycleHookReadyCondition clusterv1.ConditionType = "LifecycleHookReady" + LifecycleHookReadyCondition = "LifecycleHookReady" // LifecycleHookCreationFailedReason used for failures during lifecycle hook creation. LifecycleHookCreationFailedReason = "LifecycleHookCreationFailed" // LifecycleHookUpdateFailedReason used for failures during lifecycle hook update. @@ -71,7 +69,7 @@ const ( const ( // EKSNodegroupReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSNodegroupReadyCondition clusterv1.ConditionType = "EKSNodegroupReady" + EKSNodegroupReadyCondition = "EKSNodegroupReady" // EKSNodegroupReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSNodegroupReconciliationFailedReason = "EKSNodegroupReconciliationFailed" // WaitingForEKSControlPlaneReason used when the machine pool is waiting for @@ -81,10 +79,10 @@ const ( const ( // EKSFargateProfileReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSFargateProfileReadyCondition clusterv1.ConditionType = "EKSFargateProfileReady" + EKSFargateProfileReadyCondition = "EKSFargateProfileReady" // EKSFargateCreatingCondition condition reports on whether the fargate // profile is creating. - EKSFargateCreatingCondition clusterv1.ConditionType = "EKSFargateCreating" + EKSFargateCreatingCondition = "EKSFargateCreating" // EKSFargateDeletingCondition used to report that the profile is deleting. EKSFargateDeletingCondition = "EKSFargateDeleting" // EKSFargateReconciliationFailedReason used to report failures while reconciling EKS control plane. @@ -104,13 +102,13 @@ const ( const ( // IAMNodegroupRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMNodegroupRolesReadyCondition clusterv1.ConditionType = "IAMNodegroupRolesReady" + IAMNodegroupRolesReadyCondition = "IAMNodegroupRolesReady" // IAMNodegroupRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMNodegroupRolesReconciliationFailedReason = "IAMNodegroupRolesReconciliationFailed" // IAMFargateRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMFargateRolesReadyCondition clusterv1.ConditionType = "IAMFargateRolesReady" + IAMFargateRolesReadyCondition = "IAMFargateRolesReady" // IAMFargateRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMFargateRolesReconciliationFailedReason = "IAMFargateRolesReconciliationFailed" @@ -118,9 +116,9 @@ const ( const ( // RosaMachinePoolReadyCondition condition reports on the successful reconciliation of rosa machinepool. - RosaMachinePoolReadyCondition clusterv1.ConditionType = "RosaMachinePoolReady" + RosaMachinePoolReadyCondition = "RosaMachinePoolReady" // RosaMachinePoolUpgradingCondition condition reports whether ROSAMachinePool is upgrading or not. - RosaMachinePoolUpgradingCondition clusterv1.ConditionType = "RosaMachinePoolUpgrading" + RosaMachinePoolUpgradingCondition = "RosaMachinePoolUpgrading" // WaitingForRosaControlPlaneReason used when the machine pool is waiting for // ROSA control plane infrastructure to be ready before proceeding. diff --git a/exp/api/v1beta2/rosacluster_types.go b/exp/api/v1beta2/rosacluster_types.go index 3303125d1c..2adefa4670 100644 --- a/exp/api/v1beta2/rosacluster_types.go +++ b/exp/api/v1beta2/rosacluster_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ROSAClusterSpec defines the desired state of ROSACluster. @@ -35,13 +35,13 @@ type ROSAClusterStatus struct { // +optional Ready bool `json:"ready,omitempty"` - // FailureDomains specifies a list fo available availability zones that can be used + // FailureDomains specifies a list of available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains []clusterv1.FailureDomain `json:"failureDomains,omitempty"` // Conditions defines current service state of the ROSACluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -72,13 +72,13 @@ type ROSAClusterList struct { // GetConditions returns the observations of the operational state of the // ROSACluster resource. -func (r *ROSACluster) GetConditions() clusterv1.Conditions { +func (r *ROSACluster) GetConditions() []metav1.Condition { return r.Status.Conditions } // SetConditions sets the underlying service state of the ROSACluster to the -// predescribed clusterv1.Conditions. -func (r *ROSACluster) SetConditions(conditions clusterv1.Conditions) { +// predescribed []metav1.Condition. +func (r *ROSACluster) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/rosamachinepool_types.go b/exp/api/v1beta2/rosamachinepool_types.go index 0dc3af30ed..24d534efd5 100644 --- a/exp/api/v1beta2/rosamachinepool_types.go +++ b/exp/api/v1beta2/rosamachinepool_types.go @@ -22,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // RosaMachinePoolSpec defines the desired state of RosaMachinePool. @@ -205,20 +204,10 @@ type RosaMachinePoolStatus struct { // Replicas is the most recently observed number of replicas. // +optional Replicas int32 `json:"replicas"` + // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` - // FailureMessage will be set in the event that there is a terminal problem - // reconciling the state and will be set to a descriptive error message. - // - // This field should not be set for transitive errors that a controller - // faces that are expected to be fixed automatically over - // time (like service outages), but instead indicate that something is - // fundamentally wrong with the spec or the configuration of - // the controller, and that manual intervention is required. - // - // +optional - FailureMessage *string `json:"failureMessage,omitempty"` + Conditions []metav1.Condition `json:"conditions,omitempty"` // ID is the ID given by ROSA. ID string `json:"id,omitempty"` @@ -253,12 +242,12 @@ type ROSAMachinePoolList struct { } // GetConditions returns the observations of the operational state of the RosaMachinePool resource. -func (r *ROSAMachinePool) GetConditions() clusterv1.Conditions { +func (r *ROSAMachinePool) GetConditions() []metav1.Condition { return r.Status.Conditions } -// SetConditions sets the underlying service state of the RosaMachinePool to the predescribed clusterv1.Conditions. -func (r *ROSAMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the RosaMachinePool to the predescribed []metav1.Condition. +func (r *ROSAMachinePool) SetConditions(conditions []metav1.Condition) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go index 6885eb4c64..6a5ce0ba98 100644 --- a/exp/api/v1beta2/zz_generated.deepcopy.go +++ b/exp/api/v1beta2/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -219,6 +219,21 @@ func (in *AWSMachinePool) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachinePoolInitializationStatus) DeepCopyInto(out *AWSMachinePoolInitializationStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachinePoolInitializationStatus. +func (in *AWSMachinePoolInitializationStatus) DeepCopy() *AWSMachinePoolInitializationStatus { + if in == nil { + return nil + } + out := new(AWSMachinePoolInitializationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSMachinePoolInstanceStatus) DeepCopyInto(out *AWSMachinePoolInstanceStatus) { *out = *in @@ -348,9 +363,25 @@ func (in *AWSMachinePoolSpec) DeepCopy() *AWSMachinePoolSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) { *out = *in + if in.ReadyReplicas != nil { + in, out := &in.ReadyReplicas, &out.ReadyReplicas + *out = new(int32) + **out = **in + } + if in.AvailableReplicas != nil { + in, out := &in.AvailableReplicas, &out.AvailableReplicas + *out = new(int32) + **out = **in + } + if in.UpToDateReplicas != nil { + in, out := &in.UpToDateReplicas, &out.UpToDateReplicas + *out = new(int32) + **out = **in + } + out.Initialization = in.Initialization if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -367,16 +398,6 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) { *out = new(string) **out = **in } - if in.FailureReason != nil { - in, out := &in.FailureReason, &out.FailureReason - *out = new(string) - **out = **in - } - if in.FailureMessage != nil { - in, out := &in.FailureMessage, &out.FailureMessage - *out = new(string) - **out = **in - } if in.ASGStatus != nil { in, out := &in.ASGStatus, &out.ASGStatus *out = new(ASGStatus) @@ -604,7 +625,7 @@ func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolSt } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -740,19 +761,9 @@ func (in *FargateProfileSpec) DeepCopy() *FargateProfileSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FargateProfileStatus) DeepCopyInto(out *FargateProfileStatus) { *out = *in - if in.FailureReason != nil { - in, out := &in.FailureReason, &out.FailureReason - *out = new(string) - **out = **in - } - if in.FailureMessage != nil { - in, out := &in.FailureMessage, &out.FailureMessage - *out = new(string) - **out = **in - } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1046,14 +1057,14 @@ func (in *ROSAClusterStatus) DeepCopyInto(out *ROSAClusterStatus) { *out = *in if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() + *out = make([]corev1beta2.FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1273,16 +1284,11 @@ func (in *RosaMachinePoolStatus) DeepCopyInto(out *RosaMachinePoolStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(v1beta1.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.FailureMessage != nil { - in, out := &in.FailureMessage, &out.FailureMessage - *out = new(string) - **out = **in - } if in.AvailableUpgrades != nil { in, out := &in.AvailableUpgrades, &out.AvailableUpgrades *out = make([]string, len(*in)) diff --git a/exp/controlleridentitycreator/suite_test.go b/exp/controlleridentitycreator/suite_test.go index 4cf1b0bb12..a294966d4e 100644 --- a/exp/controlleridentitycreator/suite_test.go +++ b/exp/controlleridentitycreator/suite_test.go @@ -30,7 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/exp/controllers/awsfargatepool_controller.go b/exp/controllers/awsfargatepool_controller.go index b4fbb0f99d..2da8244f83 100644 --- a/exp/controllers/awsfargatepool_controller.go +++ b/exp/controllers/awsfargatepool_controller.go @@ -21,6 +21,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -35,7 +36,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" @@ -112,13 +113,23 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re } defer func() { - applicableConditions := []clusterv1.ConditionType{ + forConditionTypes := conditions.ForConditionTypes{ expinfrav1.IAMFargateRolesReadyCondition, expinfrav1.EKSFargateProfileReadyCondition, } - conditions.SetSummary(fargateProfileScope.FargateProfile, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) - + summaryOpts := []conditions.SummaryOption{ + forConditionTypes, + } + readyCondition, err := conditions.NewSummaryCondition(fargateProfileScope.FargateProfile, clusterv1.ReadyCondition, summaryOpts...) + if err != nil { + readyCondition = &metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + } + } + conditions.Set(fargateProfileScope.FargateProfile, *readyCondition) + if err := fargateProfileScope.Close(); err != nil && reterr == nil { reterr = err } @@ -126,7 +137,11 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(fargateProfile, clusterv1.ReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(fargateProfile, metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.WaitingForEKSControlPlaneReason, + }) return ctrl.Result{}, nil } diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index 0a9fa2b43c..3d8650bfb4 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -22,6 +22,7 @@ import ( "fmt" "time" + "github.com/aws/smithy-go/ptr" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" @@ -52,8 +53,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -180,17 +180,22 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Always close the scope when exiting this function so we can persist any AWSMachine changes. defer func() { - // set Ready condition before AWSMachinePool is patched - conditions.SetSummary(machinePoolScope.AWSMachinePool, - conditions.WithConditions( - expinfrav1.ASGReadyCondition, - expinfrav1.LaunchTemplateReadyCondition, - ), - conditions.WithStepCounterIfOnly( - expinfrav1.ASGReadyCondition, - expinfrav1.LaunchTemplateReadyCondition, - ), - ) + forConditionTypes := conditions.ForConditionTypes{ + expinfrav1.ASGReadyCondition, + expinfrav1.LaunchTemplateReadyCondition, + } + summaryOpts := []conditions.SummaryOption{ + forConditionTypes, + } + + readyCondition, err := conditions.NewSummaryCondition(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, summaryOpts...) + if err != nil { + readyCondition = &metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + } + } + conditions.Set(machinePoolScope.AWSMachinePool, *readyCondition) if err := machinePoolScope.Close(); err != nil && reterr == nil { reterr = err @@ -228,7 +233,7 @@ func (r *AWSMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctr WithOptions(options). For(&expinfrav1.AWSMachinePool{}). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(expinfrav1.GroupVersion.WithKind("AWSMachinePool"))), ). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)). @@ -277,16 +282,25 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP } } - if !machinePoolScope.Cluster.Status.InfrastructureReady { + if !*machinePoolScope.Cluster.Status.Initialization.InfrastructureProvisioned { machinePoolScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + + conditions.Set(machinePoolScope.AWSMachinePool, metav1.Condition{ + Type: expinfrav1.ASGReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitingForClusterInfrastructureReason, + }) return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { machinePoolScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(machinePoolScope.AWSMachinePool, metav1.Condition{ + Type: expinfrav1.ASGReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.WaitingForBootstrapDataReason, + }) return ctrl.Result{}, nil } @@ -298,7 +312,12 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP // Find existing ASG asg, err := r.findASG(machinePoolScope, asgsvc) if err != nil { - conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGNotFoundReason, "%s", err.Error()) + conditions.Set(machinePoolScope.AWSMachinePool, metav1.Condition{ + Type: expinfrav1.ASGReadyCondition, + Status: metav1.ConditionUnknown, + Reason: expinfrav1.ASGNotFoundReason, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, err } @@ -343,12 +362,19 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition) - + conditions.Set(machinePoolScope.AWSMachinePool, metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionTrue, + }) if asg == nil { // Create new ASG if err := r.createPool(machinePoolScope, clusterScope); err != nil { - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(machinePoolScope.AWSMachinePool, metav1.Condition{ + Type: expinfrav1.ASGReadyCondition, + Status: metav1.ConditionTrue, + Reason: expinfrav1.ASGProvisionFailedReason, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, err } return ctrl.Result{ @@ -363,14 +389,22 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP } if err := createAWSMachinesIfNotExists(ctx, awsMachineList, machinePoolScope.MachinePool, &machinePoolScope.AWSMachinePool.ObjectMeta, &machinePoolScope.AWSMachinePool.TypeMeta, asg, machinePoolScope.GetLogger(), r.Client, ec2Svc); err != nil { - machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machinePoolScope, metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.AWSMachineCreationFailed, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, fmt.Errorf("failed to create awsmachines: %w", err) } if err := deleteOrphanedAWSMachines(ctx, awsMachineList, asg, machinePoolScope.GetLogger(), r.Client); err != nil { - machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(machinePoolScope, metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.AWSMachineDeletionFailed, + Message: fmt.Sprintf("%s", err), + }) return ctrl.Result{}, fmt.Errorf("failed to clean up awsmachines: %w", err) } } @@ -426,9 +460,13 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP machinePoolScope.SetAnnotation("cluster-api-provider-aws", "true") machinePoolScope.AWSMachinePool.Spec.ProviderIDList = providerIDList - machinePoolScope.AWSMachinePool.Status.Replicas = int32(len(providerIDList)) //#nosec G115 - machinePoolScope.AWSMachinePool.Status.Ready = true - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition) + machinePoolScope.AWSMachinePool.Status.ReadyReplicas = ptr.Int32(int32(len(providerIDList))) //#nosec G115 + + conditions.Set(machinePoolScope.AWSMachinePool, metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: expinfrav1.ASGReadyCondition, + }) err = machinePoolScope.UpdateInstanceStatuses(ctx, asg.Instances) if err != nil { @@ -473,8 +511,11 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(ctx context.Context, machineP switch asg.Status { case expinfrav1.ASGStatusDeleteInProgress: // ASG is already deleting - machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGDeletionInProgress, clusterv1.ConditionSeverityWarning, "") + conditions.Set(machinePoolScope.AWSMachinePool, metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.ASGDeletionInProgress, + }) r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "DeletionInProgress", "ASG deletion in progress: %q", asg.Name) machinePoolScope.Info("ASG is already deleting", "name", asg.Name) default: @@ -654,7 +695,7 @@ func diffASG(machinePoolScope *scope.MachinePoolScope, existingASG *expinfrav1.A } // getOwnerMachinePool returns the MachinePool object owning the current resource. -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expclusterv1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -663,7 +704,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == expclusterv1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -671,8 +712,8 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object } // getMachinePoolByName finds and return a Machine object using the specified params. -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expclusterv1.MachinePool, error) { - m := &expclusterv1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachinePool, error) { + m := &clusterv1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err @@ -682,14 +723,14 @@ func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*expclusterv1.MachinePool) + m, ok := o.(*clusterv1.MachinePool) if !ok { klog.Errorf("Expected a MachinePool but got a %T", o) } gk := gvk.GroupKind() // Return early if the GroupKind doesn't match what we expect - infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupKind() if gk != infraGK { return nil } @@ -717,7 +758,7 @@ func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log *log var managedControlPlaneScope *scope.ManagedControlPlaneScope var err error - if cluster.Spec.ControlPlaneRef != nil && cluster.Spec.ControlPlaneRef.Kind == controllers.AWSManagedControlPlaneRefKind { + if !cluster.Spec.ControlPlaneRef.IsDefined() && cluster.Spec.ControlPlaneRef.Kind == controllers.AWSManagedControlPlaneRefKind { controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneName := client.ObjectKey{ Namespace: awsMachinePool.Namespace, diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 59675a287a..785db08928 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -54,9 +54,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/labels/format" "sigs.k8s.io/cluster-api/util/patch" ) @@ -149,7 +148,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { InfrastructureReady: true, }, }, - MachinePool: &expclusterv1.MachinePool{ + MachinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: "default", @@ -159,7 +158,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachinePool", }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: "test", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ @@ -270,7 +269,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Cluster infrastructure is not ready yet")) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) + expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) }) t.Run("should exit immediately if bootstrap data secret reference isn't available", func(t *testing.T) { g := NewWithT(t) @@ -286,7 +285,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Bootstrap data secret reference is not yet available")) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) + expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) }) }) t.Run("there's a provider ID", func(t *testing.T) { @@ -1036,7 +1035,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs) g.Expect(err).To(HaveOccurred()) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.PreLaunchTemplateUpdateCheckCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, expinfrav1.PreLaunchTemplateUpdateCheckFailedReason}}) + expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.PreLaunchTemplateUpdateCheckCondition, metav1.ConditionFalse, clusterv1.ConditionSeverityWarning, expinfrav1.PreLaunchTemplateUpdateCheckFailedReason}}) // Now simulate that no pending instance refresh exists asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil) @@ -1372,7 +1371,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { type conditionAssertion struct { conditionType clusterv1.ConditionType - status corev1.ConditionStatus + status metav1.ConditionStatus severity clusterv1.ConditionSeverity reason string } @@ -1420,8 +1419,8 @@ func TestDiffASG(t *testing.T) { name: "replicas != asg.desiredCapacity", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1436,8 +1435,8 @@ func TestDiffASG(t *testing.T) { name: "replicas (nil) != asg.desiredCapacity", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: nil, }, }, @@ -1452,8 +1451,8 @@ func TestDiffASG(t *testing.T) { name: "replicas != asg.desiredCapacity (nil)", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1468,8 +1467,8 @@ func TestDiffASG(t *testing.T) { name: "maxSize != asg.maxSize", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1490,8 +1489,8 @@ func TestDiffASG(t *testing.T) { name: "minSize != asg.minSize", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1514,8 +1513,8 @@ func TestDiffASG(t *testing.T) { name: "capacityRebalance != asg.capacityRebalance", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1540,8 +1539,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy != asg.MixedInstancesPolicy", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1574,8 +1573,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy.InstancesDistribution != asg.MixedInstancesPolicy.InstancesDistribution", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1627,8 +1626,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy.InstancesDistribution unset", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1674,8 +1673,8 @@ func TestDiffASG(t *testing.T) { name: "SuspendProcesses != asg.SuspendProcesses", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1715,8 +1714,8 @@ func TestDiffASG(t *testing.T) { name: "all matches", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1753,13 +1752,13 @@ func TestDiffASG(t *testing.T) { name: "externally managed annotation ignores difference between desiredCapacity and replicas", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ + MachinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ clusterv1.ReplicasManagedByAnnotation: "", // empty value counts as true (= externally managed) }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1777,8 +1776,8 @@ func TestDiffASG(t *testing.T) { name: "without externally managed annotation ignores difference between desiredCapacity and replicas", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, diff --git a/exp/controllers/awsmachinepool_machines.go b/exp/controllers/awsmachinepool_machines.go index 24c633df05..db7f7076f5 100644 --- a/exp/controllers/awsmachinepool_machines.go +++ b/exp/controllers/awsmachinepool_machines.go @@ -10,6 +10,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -17,13 +18,11 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/labels/format" ) -func createAWSMachinesIfNotExists(ctx context.Context, awsMachineList *infrav1.AWSMachineList, mp *expclusterv1.MachinePool, infraMachinePoolMeta *metav1.ObjectMeta, infraMachinePoolType *metav1.TypeMeta, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client, ec2Svc services.EC2Interface) error { +func createAWSMachinesIfNotExists(ctx context.Context, awsMachineList *infrav1.AWSMachineList, mp *clusterv1.MachinePool, infraMachinePoolMeta *metav1.ObjectMeta, infraMachinePoolType *metav1.TypeMeta, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client, ec2Svc services.EC2Interface) error { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return errors.New("createAWSMachinesIfNotExists must not be called unless the MachinePoolMachines feature gate is enabled") } @@ -161,7 +160,7 @@ func deleteOrphanedAWSMachines(ctx context.Context, awsMachineList *infrav1.AWSM return nil } -func getAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, kubeClient client.Client) (*infrav1.AWSMachineList, error) { +func getAWSMachines(ctx context.Context, mp *clusterv1.MachinePool, kubeClient client.Client) (*infrav1.AWSMachineList, error) { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return nil, errors.New("getAWSMachines must not be called unless the MachinePoolMachines feature gate is enabled") } @@ -177,11 +176,12 @@ func getAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, kubeClien return awsMachineList, nil } -func reconcileDeleteAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, client client.Client, l logr.Logger) error { +func reconcileDeleteAWSMachines(ctx context.Context, mp *clusterv1.MachinePool, client client.Client, l logr.Logger) error { if !feature.Gates.Enabled(feature.MachinePoolMachines) { + return errors.New("reconcileDeleteAWSMachines must not be called unless the MachinePoolMachines feature gate is enabled") } - + awsMachineList, err := getAWSMachines(ctx, mp, client) if err != nil { return err diff --git a/exp/controllers/awsmanagedmachinepool_controller.go b/exp/controllers/awsmanagedmachinepool_controller.go index b7e918f7aa..1ee06e0b8f 100644 --- a/exp/controllers/awsmanagedmachinepool_controller.go +++ b/exp/controllers/awsmanagedmachinepool_controller.go @@ -43,8 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" @@ -75,7 +74,7 @@ func (r *AWSManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -151,7 +150,11 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(awsPool, expinfrav1.EKSNodegroupReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(awsPool, metav1.Condition{ + Type: expinfrav1.EKSNodegroupReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.WaitingForEKSControlPlaneReason, + }) return ctrl.Result{}, nil } @@ -173,13 +176,23 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr } defer func() { - applicableConditions := []clusterv1.ConditionType{ + forConditionTypes := conditions.ForConditionTypes{ expinfrav1.EKSNodegroupReadyCondition, expinfrav1.IAMNodegroupRolesReadyCondition, expinfrav1.LaunchTemplateReadyCondition, } + summaryOpts := []conditions.SummaryOption{ + forConditionTypes, + } - conditions.SetSummary(machinePoolScope.ManagedMachinePool, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + readyCondition, err := conditions.NewSummaryCondition(machinePoolScope.ManagedMachinePool, clusterv1.ReadyCondition, summaryOpts...) + if err != nil { + readyCondition = &metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + } + } + conditions.Set(machinePoolScope.ManagedMachinePool, *readyCondition) if err := machinePoolScope.Close(); err != nil && reterr == nil { reterr = err @@ -222,7 +235,11 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( if err := reconSvc.ReconcileLaunchTemplate(ctx, machinePoolScope, machinePoolScope, s3Scope, ec2svc, objectStoreSvc, canUpdateLaunchTemplate, runPostLaunchTemplateUpdateOperation); err != nil { r.Recorder.Eventf(machinePoolScope.ManagedMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err) machinePoolScope.Error(err, "failed to reconcile launch template") - conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "") + conditions.Set(machinePoolScope.ManagedMachinePool, metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateReconcileFailedReason, + }) return err } @@ -236,7 +253,10 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition) + conditions.Set(machinePoolScope.ManagedMachinePool, metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionTrue, + }) } if err := ekssvc.ReconcilePool(ctx); err != nil { @@ -328,7 +348,7 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema. return nil } - managedPoolForClusterList := expclusterv1.MachinePoolList{} + managedPoolForClusterList := clusterv1.MachinePoolList{} if err := c.List( ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, ); err != nil { diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index 9aebd92622..1c16af04c7 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -37,11 +37,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -71,7 +70,7 @@ func (r *ROSAMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ct WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -361,7 +360,7 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope condition := &clusterv1.Condition{ Type: expinfrav1.RosaMachinePoolUpgradingCondition, - Status: corev1.ConditionTrue, + Status: metav1.ConditionTrue, Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), } @@ -459,7 +458,7 @@ func validateMachinePoolSpec(machinePoolScope *scope.RosaMachinePoolScope) (*str return nil, nil } -func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec expclusterv1.MachinePoolSpec, controlPlaneChannelGroup rosacontrolplanev1.ChannelGroupType) *cmv1.NodePoolBuilder { +func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec clusterv1.MachinePoolSpec, controlPlaneChannelGroup rosacontrolplanev1.ChannelGroupType) *cmv1.NodePoolBuilder { npBuilder := cmv1.NewNodePool().ID(rosaMachinePoolSpec.NodePoolName). Labels(rosaMachinePoolSpec.Labels). AutoRepair(rosaMachinePoolSpec.AutoRepair) @@ -602,7 +601,7 @@ func rosaControlPlaneToRosaMachinePoolMapFunc(c client.Client, gvk schema.GroupV return nil } - managedPoolForClusterList := expclusterv1.MachinePoolList{} + managedPoolForClusterList := clusterv1.MachinePoolList{} if err := c.List( ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, ); err != nil { diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go index 553cc38922..8fa587ddd0 100644 --- a/exp/controllers/rosamachinepool_controller_test.go +++ b/exp/controllers/rosamachinepool_controller_test.go @@ -30,8 +30,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -72,7 +71,7 @@ func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { }, } - machinePoolSpec := expclusterv1.MachinePoolSpec{ + machinePoolSpec := clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](2), } @@ -188,8 +187,8 @@ func TestRosaMachinePoolReconcile(t *testing.T) { } } - ownerMachinePool := func(i int) *expclusterv1.MachinePool { - return &expclusterv1.MachinePool{ + ownerMachinePool := func(i int) *clusterv1.MachinePool { + return &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("machinepool-%v", i), Namespace: ns.Name, @@ -200,7 +199,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: fmt.Sprintf("owner-cluster-%v", i), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ @@ -210,7 +209,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Name: rosaMachinePool(i).Name, Namespace: ns.Namespace, Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, @@ -222,7 +221,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { name string newROSAMachinePool *expinfrav1.ROSAMachinePool oldROSAMachinePool *expinfrav1.ROSAMachinePool - machinePool *expclusterv1.MachinePool + machinePool *clusterv1.MachinePool expect func(m *mocks.MockOCMClientMockRecorder) result reconcile.Result }{ @@ -350,7 +349,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { }, { name: "Create nodepool, replicas are set in MachinePool", - machinePool: &expclusterv1.MachinePool{ + machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: ownerMachinePool(3).Name, Namespace: ns.Name, @@ -361,7 +360,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: ownerCluster(3).Name, Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ @@ -372,7 +371,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Name: rosaMachinePool(3).Name, Namespace: ns.Namespace, Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, @@ -412,7 +411,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { }, { name: "Update nodepool, replicas are updated from MachinePool", - machinePool: &expclusterv1.MachinePool{ + machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: ownerMachinePool(4).Name, Namespace: ns.Name, @@ -423,7 +422,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: ownerCluster(4).Name, Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ @@ -434,7 +433,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Name: rosaMachinePool(4).Name, Namespace: ns.Namespace, Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, @@ -529,7 +528,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { test.oldROSAMachinePool.Status.Conditions = clusterv1.Conditions{ { Type: "Paused", - Status: corev1.ConditionFalse, + Status: metav1.ConditionFalse, Reason: "NotPaused", }, } diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 9283f003e9..3bc3642c44 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -31,8 +31,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -53,7 +52,6 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(corev1.AddToScheme(scheme.Scheme)) utilruntime.Must(rosacontrolplanev1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ diff --git a/exp/instancestate/suite_test.go b/exp/instancestate/suite_test.go index 2e669f7bfd..28892be7ed 100644 --- a/exp/instancestate/suite_test.go +++ b/exp/instancestate/suite_test.go @@ -30,8 +30,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -55,7 +54,6 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/go.mod b/go.mod index 1ffc472259..37238efa38 100644 --- a/go.mod +++ b/go.mod @@ -31,65 +31,71 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/coreos/ignition v0.35.0 github.com/coreos/ignition/v2 v2.16.2 - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/gofrs/flock v0.8.1 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f github.com/google/gofuzz v1.2.0 - github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.3 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.38.0 github.com/openshift-online/ocm-common v0.0.29 github.com/openshift-online/ocm-sdk-go v0.1.465 github.com/openshift/rosa v1.2.55 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.22.0 github.com/sergi/go-diff v1.3.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/pflag v1.0.7 github.com/zgalor/weberr v0.8.2 - golang.org/x/crypto v0.36.0 - golang.org/x/net v0.38.0 - golang.org/x/text v0.23.0 + golang.org/x/crypto v0.40.0 + golang.org/x/net v0.42.0 + golang.org/x/text v0.27.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/apiserver v0.32.3 + k8s.io/api v0.33.3 + k8s.io/apiextensions-apiserver v0.33.3 + k8s.io/apimachinery v0.33.3 + k8s.io/apiserver v0.33.3 k8s.io/cli-runtime v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/component-base v0.32.3 + k8s.io/client-go v0.33.3 + k8s.io/component-base v0.33.3 k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.32.3 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/aws-iam-authenticator v0.6.13 - sigs.k8s.io/cluster-api v1.10.2 - sigs.k8s.io/cluster-api/test v1.10.2 - sigs.k8s.io/controller-runtime v0.20.4 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/cluster-api v1.11.1 + sigs.k8s.io/cluster-api/test v1.11.1 + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/yaml v1.6.0 ) -require github.com/aws/aws-sdk-go v1.55.7 // indirect +require ( + github.com/aws/aws-sdk-go v1.55.7 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect +) require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.18.0 // indirect - dario.cat/mergo v1.0.1 // indirect + cel.dev/expr v0.19.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect - github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect @@ -109,7 +115,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect + github.com/blang/semver/v4 v4.0.0 github.com/briandowns/spinner v1.11.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -122,7 +128,7 @@ require ( github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.0.2+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -146,22 +152,19 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/glog v1.2.5 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect - github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/css v1.0.1 // indirect - github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -173,9 +176,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/microcosm-cc/bluemonday v1.0.26 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect @@ -191,56 +192,54 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 // indirect - github.com/shopspring/decimal v1.4.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/viper v1.20.0 // indirect + github.com/spf13/viper v1.20.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/valyala/fastjson v1.6.4 // indirect github.com/vincent-petithory/dataurl v1.0.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zalando/go-keyring v0.2.3 // indirect gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/time v0.8.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.34.0 // indirect + golang.org/x/term v0.33.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.34.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect - google.golang.org/grpc v1.67.3 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.71.3 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cluster-bootstrap v0.32.3 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + k8s.io/cluster-bootstrap v0.33.3 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kind v0.27.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/kind v0.30.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 61860ee4e4..3c670b0f01 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= @@ -20,6 +20,7 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= @@ -40,8 +41,6 @@ github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4t github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/amazon-vpc-cni-k8s v1.15.5 h1:/mqTXB4HoGYg4CiU4Gco9iEvZ+V/309Na4HEMPgok5Q= github.com/aws/amazon-vpc-cni-k8s v1.15.5/go.mod h1:jV4wNtmgT2Ra1/oZU99DPOFsCUKnf0mYfIyzDyAUVAY= github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= @@ -145,12 +144,16 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= -github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.27 h1:WIIw5sU0LfGgoGnhdrYdVcto/aWmJoGA/C62iwkU0JM= +github.com/coredns/corefile-migration v1.0.27/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb h1:rmqyI19j3Z/74bIRhuC59RB442rXUazKNueVpfJPxg4= github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb/go.mod h1:rcFZM3uxVvdyNmsAV2jopgPD1cs5SPWJWU5dOz2LUnw= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= @@ -178,16 +181,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= -github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= @@ -213,8 +214,8 @@ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8 github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -266,10 +267,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -290,26 +291,18 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f h1:5CjVwnuUcp5adK4gmY6i72gpVFVnZDP2h5TmPScB6u4= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= @@ -346,8 +339,6 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -356,6 +347,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -364,6 +357,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -392,6 +387,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -418,13 +417,13 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= -github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= +github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= @@ -444,13 +443,15 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -458,8 +459,8 @@ github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= @@ -477,8 +478,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= @@ -487,16 +486,18 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= -github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -510,10 +511,6 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= -github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -521,8 +518,6 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -533,42 +528,30 @@ github.com/zgalor/weberr v0.8.2/go.mod h1:cqK89mj84q3PRgqQXQFWJDzCorOd8xOtov/ulO github.com/ziutek/telnet v0.0.0-20180329124119-c3b780dc415b/go.mod h1:IZpXDfkJ6tWD3PhBK5YzgQT+xJWh7OsdwiG8hA2MkO4= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a h1:DxppxFKRqJ8WD6oJ3+ZXKDY0iMONQDl5UTg2aTyHh8k= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a/go.mod h1:NREvu3a57BaK0R1+ztrEzHWiZAihohNLQ6trPxlIqZI= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0= -go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U= -go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM= -go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ= -go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= @@ -577,13 +560,17 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -606,19 +593,19 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -642,18 +629,18 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -664,8 +651,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -676,18 +663,16 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= -google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/grpc v1.71.3 h1:iEhneYTxOruJyZAxdAv8Y0iRZvsc5M6KoW7UA0/7jn0= +google.golang.org/grpc v1.71.3/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -696,8 +681,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -708,8 +693,6 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWM gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -726,45 +709,49 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= -k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= +k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= -k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= -k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= -k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= +k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= +k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= +k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= +k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/aws-iam-authenticator v0.6.13 h1:QSQcAkpt/hF97Ogyoz6sj3WD2twTd2cmxFb4e6Rs9gA= sigs.k8s.io/aws-iam-authenticator v0.6.13/go.mod h1:CnvFyzR/xeLHmUY/BD0qW6q0wp6KIwXmFp4eTfrHdP8= -sigs.k8s.io/cluster-api v1.10.2 h1:xfvtNu4Fy/41grL0ryH5xSKQjpJEWdO8HiV2lPCCozQ= -sigs.k8s.io/cluster-api v1.10.2/go.mod h1:/b9Un5Imprib6S7ZOcJitC2ep/5wN72b0pXpMQFfbTw= -sigs.k8s.io/cluster-api/test v1.10.2 h1:y6vSdS9FSAi/DNoFE2fZo2fed0m1cgW+ueBazk1g4i8= -sigs.k8s.io/cluster-api/test v1.10.2/go.mod h1:KLeRjNtQS8k5jIPvQF0QxOti/ATu5euwSusb6iFBga8= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/cluster-api v1.11.1 h1:7CyGCTxv1p3Y2kRe1ljTj/w4TcdIdWNj0CTBc4i1aBo= +sigs.k8s.io/cluster-api v1.11.1/go.mod h1:zyrjgJ5RbXhwKcAdUlGPNK5YOHpcmxXvur+5I8lkMUQ= +sigs.k8s.io/cluster-api/test v1.11.1 h1:p9tT2HupKHW1URQDsZ3QNdEC/YPc8nrkiV6RCtNgi5k= +sigs.k8s.io/cluster-api/test v1.11.1/go.mod h1:COviHWIKTcip0VADeIh8Rm5bjqzyZ1LuzKBW1EqjJRc= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= -sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= +sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/main.go b/main.go index 8aac35b373..c23da1b77e 100644 --- a/main.go +++ b/main.go @@ -65,8 +65,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/version" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/flags" ) @@ -80,7 +79,6 @@ func init() { _ = eksbootstrapv1beta1.AddToScheme(scheme) _ = cgscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) - _ = expclusterv1.AddToScheme(scheme) _ = ekscontrolplanev1.AddToScheme(scheme) _ = ekscontrolplanev1beta1.AddToScheme(scheme) _ = rosacontrolplanev1.AddToScheme(scheme) diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go index 82c671e2c1..cacaea2db7 100644 --- a/pkg/cloud/interfaces.go +++ b/pkg/cloud/interfaces.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -46,6 +46,7 @@ type ScopeUsage interface { // ClusterObject represents a AWS cluster object. type ClusterObject interface { conditions.Setter + client.Object } // ClusterScoper is the interface for a cluster scope. @@ -84,7 +85,7 @@ type ClusterScoper interface { // AdditionalTags returns any tags that you would like to attach to AWS resources. The returned value will never be nil. AdditionalTags() infrav1.Tags // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. - SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) + SetFailureDomain(id string, spec []clusterv1.FailureDomain) // PatchObject persists the cluster configuration and status. PatchObject() error // Close closes the current scope persisting the cluster configuration and status. diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index 730b977578..74fdd3b640 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -23,6 +23,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -250,7 +251,7 @@ func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption { func (s *ClusterScope) PatchObject() error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding during the deletion process). - applicableConditions := []clusterv1.ConditionType{ + forConditionTypes := conditions.ForConditionTypes{ infrav1.VpcReadyCondition, infrav1.SubnetsReadyCondition, infrav1.ClusterSecurityGroupsReadyCondition, @@ -258,7 +259,7 @@ func (s *ClusterScope) PatchObject() error { } if s.VPC().IsManaged(s.Name()) { - applicableConditions = append(applicableConditions, + forConditionTypes = append(forConditionTypes, infrav1.InternetGatewayReadyCondition, infrav1.NatGatewaysReadyCondition, infrav1.RouteTablesReadyCondition, @@ -266,23 +267,42 @@ func (s *ClusterScope) PatchObject() error { ) if s.AWSCluster.Spec.Bastion.Enabled { - applicableConditions = append(applicableConditions, infrav1.BastionHostReadyCondition) + forConditionTypes = append(forConditionTypes, infrav1.BastionHostReadyCondition) } if s.VPC().IsIPv6Enabled() { - applicableConditions = append(applicableConditions, infrav1.EgressOnlyInternetGatewayReadyCondition) + forConditionTypes = append(forConditionTypes, infrav1.EgressOnlyInternetGatewayReadyCondition) } } - conditions.SetSummary(s.AWSCluster, - conditions.WithConditions(applicableConditions...), - conditions.WithStepCounterIf(s.AWSCluster.ObjectMeta.DeletionTimestamp.IsZero()), - conditions.WithStepCounter(), - ) + summaryOpts := []conditions.SummaryOption{ + forConditionTypes, + //// Instruct summary to consider Deleting condition with negative polarity. + //conditions.NegativePolarityConditionTypes{}, + //// Using a custom merge strategy to override reasons applied during merge and to ignore some + //// info message so the available condition is less noisy. + //conditions.CustomMergeStrategy{ + // MergeStrategy: clusterConditionCustomMergeStrategy{ + // cluster: cluster, + // // Instruct merge to consider Deleting condition with negative polarity, + // negativePolarityConditionTypes: negativePolarityConditionTypes, + // }, + //}, + } + + availableCondition, err := conditions.NewSummaryCondition(s.AWSCluster, clusterv1.ReadyCondition, summaryOpts...) + if err != nil { + availableCondition = &metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + } + } + + conditions.Set(s.AWSCluster, *availableCondition) return s.patchHelper.Patch( context.TODO(), s.AWSCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.ReadyCondition, infrav1.VpcReadyCondition, infrav1.SubnetsReadyCondition, @@ -315,18 +335,18 @@ func (s *ClusterScope) AdditionalTags() infrav1.Tags { // APIServerPort returns the APIServerPort to use when creating the load balancer. func (s *ClusterScope) APIServerPort() int32 { - if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return *s.Cluster.Spec.ClusterNetwork.APIServerPort + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + return s.Cluster.Spec.ClusterNetwork.APIServerPort } return infrav1.DefaultAPIServerPort } // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. -func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { +func (s *ClusterScope) SetFailureDomain(id string, spec []clusterv1.FailureDomain) { if s.AWSCluster.Status.FailureDomains == nil { - s.AWSCluster.Status.FailureDomains = make(clusterv1.FailureDomains) + s.AWSCluster.Status.FailureDomains = make([]clusterv1.FailureDomain, len(spec)) } - s.AWSCluster.Status.FailureDomains[id] = spec + s.AWSCluster.Status.FailureDomains = spec } // SetNatGatewaysIPs sets the Nat Gateways Public IPs. diff --git a/pkg/cloud/scope/elb.go b/pkg/cloud/scope/elb.go index 3d588f665b..8d092a92ee 100644 --- a/pkg/cloud/scope/elb.go +++ b/pkg/cloud/scope/elb.go @@ -19,7 +19,7 @@ package scope import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ELBScope is a scope for use with the ELB reconciling service. diff --git a/pkg/cloud/scope/fargate.go b/pkg/cloud/scope/fargate.go index 6e0fe0e1ef..6577a33a29 100644 --- a/pkg/cloud/scope/fargate.go +++ b/pkg/cloud/scope/fargate.go @@ -18,9 +18,11 @@ package scope import ( "context" + "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -168,18 +170,12 @@ func (s *FargateProfileScope) Partition() string { // IAMReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *FargateProfileScope) IAMReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning - if err == "" { - severity = clusterv1.ConditionSeverityInfo - } - conditions.MarkFalse( - s.FargateProfile, - expinfrav1.IAMFargateRolesReadyCondition, - reason, - severity, - "%s", - err, - ) + conditions.Set(s.FargateProfile, metav1.Condition{ + Type: expinfrav1.IAMFargateRolesReadyCondition, + Reason: reason, + Message: fmt.Sprintf("%s", err), + }) + if err := s.PatchObject(); err != nil { return errors.Wrap(err, "failed to mark role not ready") } @@ -191,7 +187,7 @@ func (s *FargateProfileScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.FargateProfile, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateCreatingCondition, expinfrav1.EKSFargateDeletingCondition, diff --git a/pkg/cloud/scope/launchtemplate.go b/pkg/cloud/scope/launchtemplate.go index 34e84e7ff7..30f28b0cb4 100644 --- a/pkg/cloud/scope/launchtemplate.go +++ b/pkg/cloud/scope/launchtemplate.go @@ -24,13 +24,13 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) // LaunchTemplateScope defines a scope defined around a launch template. type LaunchTemplateScope interface { - GetMachinePool() *expclusterv1.MachinePool + GetMachinePool() *clusterv1.MachinePool GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate LaunchTemplateName() string GetLaunchTemplateIDStatus() string diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index 243bd40242..a1b25cf3f5 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -19,20 +19,23 @@ package scope import ( "context" "encoding/base64" + "slices" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" + clusterapiv1beta1util "sigs.k8s.io/cluster-api-provider-aws/v2/util/clusterapiv1beta1" + "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" @@ -183,16 +186,6 @@ func (m *MachineScope) SetNotReady() { m.AWSMachine.Status.Ready = false } -// SetFailureMessage sets the AWSMachine status failure message. -func (m *MachineScope) SetFailureMessage(v error) { - m.AWSMachine.Status.FailureMessage = ptr.To[string](v.Error()) -} - -// SetFailureReason sets the AWSMachine status failure reason. -func (m *MachineScope) SetFailureReason(v string) { - m.AWSMachine.Status.FailureReason = &v -} - // SetAnnotation sets a key value annotation on the AWSMachine. func (m *MachineScope) SetAnnotation(key, value string) { if m.AWSMachine.Annotations == nil { @@ -302,25 +295,48 @@ func (m *MachineScope) GetRawBootstrapDataWithFormat() ([]byte, string, error) { func (m *MachineScope) PatchObject() error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding during the deletion process). - applicableConditions := []clusterv1.ConditionType{ + forConditionTypes := conditions.ForConditionTypes{ infrav1.InstanceReadyCondition, infrav1.SecurityGroupsReadyCondition, } if m.IsControlPlane() { - applicableConditions = append(applicableConditions, infrav1.ELBAttachedCondition) + forConditionTypes = append(forConditionTypes, infrav1.ELBAttachedCondition) + } + + // TODO(@toby-archer-tr): Review Summary Options for correctness. + + summaryOpts := []conditions.SummaryOption{ + forConditionTypes, + //// Instruct summary to consider Deleting condition with negative polarity. + //conditions.NegativePolarityConditionTypes{}, + //// Using a custom merge strategy to override reasons applied during merge and to ignore some + //// info message so the available condition is less noisy. + //conditions.CustomMergeStrategy{ + // MergeStrategy: clusterConditionCustomMergeStrategy{ + // cluster: cluster, + // // Instruct merge to consider Deleting condition with negative polarity, + // negativePolarityConditionTypes: negativePolarityConditionTypes, + // }, + //}, + } + + availableCondition, err := conditions.NewSummaryCondition(m.AWSMachine, clusterv1.MachineReadyCondition, summaryOpts...) + if err != nil { + availableCondition = &metav1.Condition{ + Type: clusterv1.MachinesReadyCondition, + Status: metav1.ConditionUnknown, + Reason: clusterv1.ClusterAvailableInternalErrorReason, + Message: "Please check controller logs for errors", + } } - conditions.SetSummary(m.AWSMachine, - conditions.WithConditions(applicableConditions...), - conditions.WithStepCounterIf(m.AWSMachine.ObjectMeta.DeletionTimestamp.IsZero()), - conditions.WithStepCounter(), - ) + conditions.Set(m.AWSMachine, *availableCondition) return m.patchHelper.Patch( context.TODO(), m.AWSMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ clusterv1.ReadyCondition, infrav1.InstanceReadyCondition, infrav1.SecurityGroupsReadyCondition, @@ -348,7 +364,9 @@ func (m *MachineScope) AdditionalTags() infrav1.Tags { // HasFailed returns the failure state of the machine scope. func (m *MachineScope) HasFailed() bool { - return m.AWSMachine.Status.FailureReason != nil || m.AWSMachine.Status.FailureMessage != nil + return slices.ContainsFunc(m.AWSMachine.Status.Conditions, func(condition metav1.Condition) bool { + return condition.Status == metav1.StatusFailure + }) } // InstanceIsRunning returns the instance state of the machine scope. @@ -399,7 +417,7 @@ func (m *MachineScope) IsControlPlaneExternallyManaged() bool { m.Error(err, "failed to get unstructured control plane") return false } - return util.IsExternalManagedControlPlane(u) + return clusterapiv1beta1util.IsExternalManagedControlPlane(u) } // IsExternallyManaged checks if the machine is externally managed. diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go index f34790d061..ebdd893369 100644 --- a/pkg/cloud/scope/machine_test.go +++ b/pkg/cloud/scope/machine_test.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func setupScheme() (*runtime.Scheme, error) { diff --git a/pkg/cloud/scope/machinepool.go b/pkg/cloud/scope/machinepool.go index f9e7fd1225..dc5c6d65b9 100644 --- a/pkg/cloud/scope/machinepool.go +++ b/pkg/cloud/scope/machinepool.go @@ -19,6 +19,7 @@ package scope import ( "context" "fmt" + "slices" "strings" "github.com/pkg/errors" @@ -27,16 +28,14 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" @@ -50,7 +49,7 @@ type MachinePoolScope struct { capiMachinePoolPatchHelper *patch.Helper Cluster *clusterv1.Cluster - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool InfraCluster EC2Scope AWSMachinePool *expinfrav1.AWSMachinePool } @@ -61,7 +60,7 @@ type MachinePoolScopeParams struct { Logger *logger.Logger Cluster *clusterv1.Cluster - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool InfraCluster EC2Scope AWSMachinePool *expinfrav1.AWSMachinePool } @@ -175,7 +174,7 @@ func (m *MachinePoolScope) PatchObject() error { return m.patchHelper.Patch( context.TODO(), m.AWSMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ expinfrav1.ASGReadyCondition, expinfrav1.LaunchTemplateReadyCondition, }}) @@ -202,24 +201,11 @@ func (m *MachinePoolScope) SetAnnotation(key, value string) { m.AWSMachinePool.Annotations[key] = value } -// SetFailureMessage sets the AWSMachine status failure message. -func (m *MachinePoolScope) SetFailureMessage(v error) { - m.AWSMachinePool.Status.FailureMessage = ptr.To[string](v.Error()) -} - -// SetFailureReason sets the AWSMachine status failure reason. -func (m *MachinePoolScope) SetFailureReason(v string) { - m.AWSMachinePool.Status.FailureReason = &v -} - // HasFailed returns true when the AWSMachinePool's Failure reason or Failure message is populated. func (m *MachinePoolScope) HasFailed() bool { - return m.AWSMachinePool.Status.FailureReason != nil || m.AWSMachinePool.Status.FailureMessage != nil -} - -// SetNotReady sets the AWSMachinePool Ready Status to false. -func (m *MachinePoolScope) SetNotReady() { - m.AWSMachinePool.Status.Ready = false + return slices.ContainsFunc(m.AWSMachinePool.Status.Conditions, func(condition metav1.Condition) bool { + return condition.Status == metav1.StatusFailure + }) } // GetASGStatus returns the AWSMachinePool instance state from the status. @@ -380,7 +366,7 @@ func (m *MachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate { } // GetMachinePool returns the machine pool object. -func (m *MachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { +func (m *MachinePoolScope) GetMachinePool() *clusterv1.MachinePool { return m.MachinePool } diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go index be0bc76864..59b8fad801 100644 --- a/pkg/cloud/scope/managedcontrolplane.go +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -38,7 +38,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/util/patch" ) @@ -268,7 +269,7 @@ func (s *ManagedControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ infrav1.VpcReadyCondition, infrav1.SubnetsReadyCondition, infrav1.ClusterSecurityGroupsReadyCondition, @@ -305,11 +306,11 @@ func (s *ManagedControlPlaneScope) APIServerPort() int32 { } // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. -func (s *ManagedControlPlaneScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { +func (s *ManagedControlPlaneScope) SetFailureDomain(id string, spec []clusterv1.FailureDomain) { if s.ControlPlane.Status.FailureDomains == nil { - s.ControlPlane.Status.FailureDomains = make(clusterv1.FailureDomains) + s.ControlPlane.Status.FailureDomains = make([]clusterv1.FailureDomain, len(spec)) } - s.ControlPlane.Status.FailureDomains[id] = spec + s.ControlPlane.Status.FailureDomains = spec } // InfraCluster returns the AWS infrastructure cluster or control plane object. @@ -446,16 +447,12 @@ func (s *ManagedControlPlaneScope) OIDCIdentityProviderConfig() *ekscontrolplane } // ServiceCidrs returns the CIDR blocks used for services. -func (s *ManagedControlPlaneScope) ServiceCidrs() *clusterv1.NetworkRanges { - if s.Cluster.Spec.ClusterNetwork != nil { - if s.Cluster.Spec.ClusterNetwork.Services != nil { - if len(s.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { - return s.Cluster.Spec.ClusterNetwork.Services - } - } +func (s *ManagedControlPlaneScope) ServiceCidrs() clusterv1.NetworkRanges { + if len(s.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { + return s.Cluster.Spec.ClusterNetwork.Services } - return nil + return clusterv1.NetworkRanges{} } // ControlPlaneLoadBalancer returns the AWSLoadBalancerSpec. diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go index 7ef4663a24..6bfd5ad311 100644 --- a/pkg/cloud/scope/managednodegroup.go +++ b/pkg/cloud/scope/managednodegroup.go @@ -22,12 +22,14 @@ import ( "time" awsv2 "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/ptr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -37,8 +39,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -50,7 +50,7 @@ type ManagedMachinePoolScopeParams struct { Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane ManagedMachinePool *expinfrav1.AWSManagedMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool ControllerName string Session awsv2.Config MaxWaitActiveUpdateDelete time.Duration @@ -131,7 +131,7 @@ type ManagedMachinePoolScope struct { Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane ManagedMachinePool *expinfrav1.AWSManagedMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool EC2Scope EC2Scope MaxWaitActiveUpdateDelete time.Duration @@ -201,7 +201,7 @@ func (s *ManagedMachinePoolScope) RoleName() string { // Version returns the nodegroup Kubernetes version. func (s *ManagedMachinePoolScope) Version() *string { - return s.MachinePool.Spec.Template.Spec.Version + return ptr.String(s.MachinePool.Spec.Template.Spec.Version) } // ControlPlaneSubnets returns the control plane subnets. @@ -228,18 +228,12 @@ func (s *ManagedMachinePoolScope) SubnetIDs() ([]string, error) { // NodegroupReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning - if err == "" { - severity = clusterv1.ConditionSeverityInfo - } - conditions.MarkFalse( - s.ManagedMachinePool, - expinfrav1.EKSNodegroupReadyCondition, - reason, - severity, - "%s", - err, - ) + conditions.Set(s.ManagedMachinePool, metav1.Condition{ + Type: expinfrav1.EKSNodegroupReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: fmt.Sprintf("%s", err), + }) if err := s.PatchObject(); err != nil { return errors.Wrap(err, "failed to mark nodegroup not ready") } @@ -249,18 +243,12 @@ func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) // IAMReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *ManagedMachinePoolScope) IAMReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning - if err == "" { - severity = clusterv1.ConditionSeverityInfo - } - conditions.MarkFalse( - s.ManagedMachinePool, - expinfrav1.IAMNodegroupRolesReadyCondition, - reason, - severity, - "%s", - err, - ) + conditions.Set(s.ManagedMachinePool, metav1.Condition{ + Type: expinfrav1.IAMNodegroupRolesReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: fmt.Sprintf("%s", err), + }) if err := s.PatchObject(); err != nil { return errors.Wrap(err, "failed to mark nodegroup role not ready") } @@ -272,7 +260,7 @@ func (s *ManagedMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ManagedMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ expinfrav1.EKSNodegroupReadyCondition, expinfrav1.IAMNodegroupRolesReadyCondition, }}) @@ -410,7 +398,7 @@ func (s *ManagedMachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTempl } // GetMachinePool returns the machine pool. -func (s *ManagedMachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { +func (s *ManagedMachinePoolScope) GetMachinePool() *clusterv1.MachinePool { return s.MachinePool } diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go index 4aac52bd01..44013062d3 100644 --- a/pkg/cloud/scope/rosacontrolplane.go +++ b/pkg/cloud/scope/rosacontrolplane.go @@ -35,7 +35,7 @@ import ( stsservice "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -207,7 +207,7 @@ func (s *ROSAControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ rosacontrolplanev1.ROSAControlPlaneReadyCondition, rosacontrolplanev1.ROSAControlPlaneValidCondition, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index 5c53635b5b..ddd87fa484 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -18,10 +18,13 @@ package scope import ( "context" + "fmt" awsv2 "github.com/aws/aws-sdk-go-v2/aws" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -30,8 +33,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -43,7 +44,7 @@ type RosaMachinePoolScopeParams struct { Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane RosaMachinePool *expinfrav1.ROSAMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool ControllerName string } @@ -109,7 +110,7 @@ type RosaMachinePoolScope struct { Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane RosaMachinePool *expinfrav1.ROSAMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool session awsv2.Config serviceLimiters throttle.ServiceLimiters @@ -189,18 +190,13 @@ func (s *RosaMachinePoolScope) Namespace() string { // RosaMachinePoolReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *RosaMachinePoolScope) RosaMachinePoolReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning - if err == "" { - severity = clusterv1.ConditionSeverityInfo - } - conditions.MarkFalse( - s.RosaMachinePool, - expinfrav1.RosaMachinePoolReadyCondition, - reason, - severity, - "%s", - err, - ) + conditions.Set(s.RosaMachinePool, metav1.Condition{ + Type: expinfrav1.RosaMachinePoolReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: fmt.Sprintf("%s", err), + }) + if err := s.PatchObject(); err != nil { return errors.Wrap(err, "failed to mark rosa machinepool not ready") } @@ -212,7 +208,7 @@ func (s *RosaMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.RosaMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []string{ expinfrav1.RosaMachinePoolReadyCondition, }}) } diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index ae6ff23244..094291034d 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -40,7 +40,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" @@ -91,7 +90,12 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, region, log) if err != nil { // could not get providers and retrieve the credentials - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(clusterScoper.InfraCluster(), metav1.Condition{ + Type: infrav1.PrincipalCredentialRetrievedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.PrincipalCredentialRetrievalFailedReason, + Message: fmt.Sprintf("%s", err), + }) return nil, nil, errors.Wrap(err, "Failed to get providers for cluster") } @@ -129,8 +133,12 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se // Check if identity credentials can be retrieved. One reason this will fail is that source identity is not authorized for assume role. _, err := providers[0].Retrieve(context.Background()) if err != nil { - conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.CredentialProviderBuildFailedReason, "%s", err.Error()) - + conditions.Set(clusterScoper.InfraCluster(), metav1.Condition{ + Type: infrav1.PrincipalCredentialRetrievedCondition, + Status: metav1.ConditionUnknown, + Reason: infrav1.CredentialProviderBuildFailedReason, + Message: fmt.Sprintf("%s", err), + }) // delete the existing session from cache. Otherwise, we give back a defective session on next method invocation with same cluster scope sessionCache.Delete(getSessionName(region, clusterScoper)) @@ -140,7 +148,10 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se optFns = append(optFns, config.WithCredentialsProvider(chainProvider)) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition) + conditions.Set(clusterScoper.InfraCluster(), metav1.Condition{ + Type: infrav1.PrincipalCredentialRetrievedCondition, + Status: metav1.ConditionTrue, + }) ns, err := config.LoadDefaultConfig(context.Background(), optFns...) if err != nil { @@ -288,21 +299,37 @@ func buildProvidersForRef( default: return providers, errors.Errorf("No such provider known: '%s'", ref.Kind) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) + conditions.Set(clusterScoper.InfraCluster(), metav1.Condition{ + Type: infrav1.PrincipalUsageAllowedCondition, + Status: metav1.ConditionTrue, + }) return providers, nil } func setPrincipalUsageAllowedCondition(clusterScoper cloud.SessionMetadata) { - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) + conditions.Set(clusterScoper.InfraCluster(), metav1.Condition{ + Type: infrav1.PrincipalUsageAllowedCondition, + Status: metav1.ConditionTrue, + }) } func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identityObjectKey client.ObjectKey, clusterScoper cloud.SessionMetadata) { errMsg := fmt.Sprintf(notPermittedError, kind, identityObjectKey.Name) if clusterScoper.IdentityRef().Name == identityObjectKey.Name { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.PrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) + conditions.Set(clusterScoper.InfraCluster(), metav1.Condition{ + Type: infrav1.PrincipalUsageAllowedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.PrincipalUsageUnauthorizedReason, + Message: fmt.Sprintf("%s", errMsg), + }) } else { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.SourcePrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) + conditions.Set(clusterScoper.InfraCluster(), metav1.Condition{ + Type: infrav1.PrincipalUsageAllowedCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.SourcePrincipalUsageUnauthorizedReason, + Message: fmt.Sprintf("%s", errMsg), + }) } } diff --git a/pkg/cloud/scope/session_test.go b/pkg/cloud/scope/session_test.go index 1035ca6562..c49bf01937 100644 --- a/pkg/cloud/scope/session_test.go +++ b/pkg/cloud/scope/session_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/identity" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestIsClusterPermittedToUsePrincipal(t *testing.T) { diff --git a/pkg/cloud/scope/shared.go b/pkg/cloud/scope/shared.go index cde09c9dff..420a1a449a 100644 --- a/pkg/cloud/scope/shared.go +++ b/pkg/cloud/scope/shared.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" ) @@ -136,15 +136,15 @@ func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlP // getUnstructuredControlPlane returns the unstructured object for the control plane, if any. // When the reference is not set, it returns an empty object. func getUnstructuredControlPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { - if cluster.Spec.ControlPlaneRef == nil { + if !cluster.Spec.ControlPlaneRef.IsDefined() { // If the control plane ref is not set, return an empty object. // Not having a control plane ref is valid given API contracts. return &unstructured.Unstructured{}, nil } - u, err := external.Get(ctx, client, cluster.Spec.ControlPlaneRef) + u, err := external.GetObjectFromContractVersionedRef(ctx, client, cluster.Spec.ControlPlaneRef, cluster.Namespace) if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name) + return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Spec.ControlPlaneRef, cluster.Spec.ControlPlaneRef.Name) } return u, nil } diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go index 53f1222072..98472e526a 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go @@ -42,8 +42,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/autoscaling/mock_autoscalingiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceGetASGByName(t *testing.T) { @@ -1267,7 +1266,6 @@ func getFakeClient() client.Client { scheme := runtime.NewScheme() _ = infrav1.AddToScheme(scheme) _ = expinfrav1.AddToScheme(scheme) - _ = expclusterv1.AddToScheme(scheme) return fake.NewClientBuilder().WithScheme(scheme).Build() } @@ -1350,7 +1348,7 @@ func getMachinePoolScope(client client.Client, clusterScope *scope.ClusterScope) mps, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{ Client: client, Cluster: clusterScope.Cluster, - MachinePool: &expclusterv1.MachinePool{}, + MachinePool: &clusterv1.MachinePool{}, InfraCluster: clusterScope, AWSMachinePool: awsMachinePool, }) diff --git a/pkg/cloud/services/autoscaling/lifecyclehook.go b/pkg/cloud/services/autoscaling/lifecyclehook.go index 293070fab1..02d2de4f73 100644 --- a/pkg/cloud/services/autoscaling/lifecyclehook.go +++ b/pkg/cloud/services/autoscaling/lifecyclehook.go @@ -30,8 +30,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // DescribeLifecycleHooks returns the lifecycle hooks for the given AutoScalingGroup after retrieving them from the AWS API. diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go index 8d31916530..fb4dbed202 100644 --- a/pkg/cloud/services/ec2/bastion.go +++ b/pkg/cloud/services/ec2/bastion.go @@ -26,13 +26,14 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -73,7 +74,11 @@ func (s *Service) ReconcileBastion() error { instance, err := s.describeBastionInstance() if awserrors.IsNotFound(err) { //nolint:nestif if !conditions.Has(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, infrav1.BastionCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.BastionHostReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.BastionCreationStartedReason, + }) if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -98,7 +103,10 @@ func (s *Service) ReconcileBastion() error { // TODO(vincepri): check for possible changes between the default spec and the instance. s.scope.SetBastionInstance(instance.DeepCopy()) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.BastionHostReadyCondition, + Status: metav1.ConditionTrue, + }) s.scope.Debug("Reconcile bastion completed successfully") return nil @@ -114,21 +122,33 @@ func (s *Service) DeleteBastion() error { } return errors.Wrap(err, "unable to describe bastion instance") } - - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.BastionHostReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.TerminateInstanceAndWait(instance.ID); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.BastionHostReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) record.Warnf(s.scope.InfraCluster(), "FailedTerminateBastion", "Failed to terminate bastion instance %q: %v", instance.ID, err) return errors.Wrap(err, "unable to delete bastion instance") } s.scope.SetBastionInstance(nil) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.BastionHostReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + }) record.Eventf(s.scope.InfraCluster(), "SuccessfulTerminateBastion", "Terminated bastion instance %q", instance.ID) s.scope.Info("Deleted bastion host", "id", instance.ID) diff --git a/pkg/cloud/services/ec2/bastion_test.go b/pkg/cloud/services/ec2/bastion_test.go index e48a540935..3d250ea7ee 100644 --- a/pkg/cloud/services/ec2/bastion_test.go +++ b/pkg/cloud/services/ec2/bastion_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceDeleteBastion(t *testing.T) { diff --git a/pkg/cloud/services/ec2/helper_test.go b/pkg/cloud/services/ec2/helper_test.go index bd40c2b7bb..550e9d7eb4 100644 --- a/pkg/cloud/services/ec2/helper_test.go +++ b/pkg/cloud/services/ec2/helper_test.go @@ -31,8 +31,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func setupClusterScope(cl client.Client) (*scope.ClusterScope, error) { @@ -164,8 +163,8 @@ func newAWSManagedControlPlane() *ekscontrolplanev1.AWSManagedControlPlane { } } -func newMachinePool() *v1beta1.MachinePool { - return &v1beta1.MachinePool{ +func newMachinePool() *clusterv1.MachinePool { + return &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", APIVersion: "v1", @@ -173,7 +172,7 @@ func newMachinePool() *v1beta1.MachinePool { ObjectMeta: metav1.ObjectMeta{ Name: "mp", }, - Spec: v1beta1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Version: ptr.To[string]("v1.23.3"), @@ -206,7 +205,7 @@ func setupScheme() (*runtime.Scheme, error) { if err := ekscontrolplanev1.AddToScheme(scheme); err != nil { return nil, err } - if err := v1beta1.AddToScheme(scheme); err != nil { + if err := clusterv1.AddToScheme(scheme); err != nil { return nil, err } return scheme, nil diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 6e5813c74a..d492eec152 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -28,7 +28,9 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/util/conditions" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -39,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // GetRunningInstanceByTags returns the existing instance or nothing if it doesn't exist. @@ -144,10 +146,14 @@ func (s *Service) CreateInstance(ctx context.Context, scope *scope.MachineScope, if scope.AWSMachine.Spec.AMI.ID != nil { //nolint:nestif input.ImageID = *scope.AWSMachine.Spec.AMI.ID } else { - if scope.Machine.Spec.Version == nil { + if scope.Machine.Spec.Version == "" { err := errors.New("Either AWSMachine's spec.ami.id or Machine's spec.version must be defined") - scope.SetFailureReason("CreateError") - scope.SetFailureMessage(err) + conditions.Set(scope.AWSMachine, metav1.Condition{ + Type: clusterv1.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: "CreateError", + Message: fmt.Sprintf("%s", err), + }) return nil, err } @@ -167,12 +173,12 @@ func (s *Service) CreateInstance(ctx context.Context, scope *scope.MachineScope, } if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" { - input.ImageID, err = s.eksAMILookup(ctx, *scope.Machine.Spec.Version, imageArchitecture, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType) + input.ImageID, err = s.eksAMILookup(ctx, scope.Machine.Spec.Version, imageArchitecture, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType) if err != nil { return nil, err } } else { - input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, imageArchitecture, *scope.Machine.Spec.Version) + input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, imageArchitecture, scope.Machine.Spec.Version) if err != nil { return nil, err } @@ -355,11 +361,12 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { var filtered []types.Subnet var errMessage string for _, subnet := range subnets { - if failureDomain != nil && *subnet.AvailabilityZone != *failureDomain { + + if failureDomain != "" && *subnet.AvailabilityZone != failureDomain { // we could have included the failure domain in the query criteria, but then we end up with EC2 error // messages that don't give a good hint about what is really wrong errMessage += fmt.Sprintf(" subnet %q availability zone %q does not match failure domain %q.", - *subnet.SubnetId, *subnet.AvailabilityZone, *failureDomain) + *subnet.SubnetId, *subnet.AvailabilityZone, failureDomain) continue } @@ -395,22 +402,22 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { return "", awserrors.NewFailedDependency(errMessage) } return *filtered[0].SubnetId, nil - case failureDomain != nil: + case failureDomain != "": if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP { - subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q with public IP, no public subnets available in availability zone %q", - scope.Name(), *failureDomain) + scope.Name(), failureDomain) record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } return subnets[0].GetResourceID(), nil } - subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q, no subnets available in availability zone %q", - scope.Name(), *failureDomain) + scope.Name(), failureDomain) record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index b6c7c69d23..a2012f39b6 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestInstanceIfExists(t *testing.T) { diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go index c805a00e7a..3cfa710669 100644 --- a/pkg/cloud/services/ec2/launchtemplate.go +++ b/pkg/cloud/services/ec2/launchtemplate.go @@ -20,6 +20,7 @@ import ( "context" "encoding/base64" "encoding/json" + "fmt" "sort" "strconv" "strings" @@ -33,6 +34,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" @@ -45,7 +47,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -81,13 +82,23 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("checking for existing launch template") launchTemplate, launchTemplateUserDataHash, launchTemplateUserDataSecretKey, _, err := ec2svc.GetLaunchTemplate(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateNotFoundReason, + Message: fmt.Sprintf("%s", err), + }) return err } imageID, err := ec2svc.DiscoverLaunchTemplateAMI(ctx, scope) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateCreateFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } @@ -116,16 +127,25 @@ func (s *Service) ReconcileLaunchTemplate( // Previously, user data was always written into the launch template, so we check // `AWSMachinePool.Spec.Ignition != nil` to toggle the S3 feature on for `AWSMachinePool` objects. objectURL, err := objectStoreSvc.CreateForMachinePool(ctx, scope, bootstrapData) - if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateReconcileFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } semver, err := semver.ParseTolerant(ignitionVersion) if err != nil { err = errors.Wrapf(err, "failed to parse ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateReconcileFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } @@ -148,7 +168,12 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateReconcileFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } case 3: @@ -168,12 +193,22 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateReconcileFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } default: err = errors.Errorf("unsupported ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateReconcileFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } } else { @@ -188,7 +223,12 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("no existing launch template found, creating") launchTemplateID, err := ec2svc.CreateLaunchTemplate(scope, imageID, *bootstrapDataSecretKey, userDataForLaunchTemplate, userdata.ComputeHash(bootstrapData)) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateCreateFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } @@ -201,7 +241,12 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateIDStatus() == "" { launchTemplateID, err := ec2svc.GetLaunchTemplateID(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateNotFoundReason, + Message: fmt.Sprintf("%s", err), + }) return err } scope.SetLaunchTemplateIDStatus(launchTemplateID) @@ -211,7 +256,12 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateLatestVersionStatus() == "" { launchTemplateVersion, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.LaunchTemplateReadyCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.LaunchTemplateNotFoundReason, + Message: fmt.Sprintf("%s", err), + }) return err } scope.SetLaunchTemplateLatestVersionStatus(launchTemplateVersion) @@ -249,7 +299,11 @@ func (s *Service) ReconcileLaunchTemplate( return err } if !canUpdate { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.PreLaunchTemplateUpdateCheckCondition, expinfrav1.PreLaunchTemplateUpdateCheckFailedReason, clusterv1.ConditionSeverityWarning, "") + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.PreLaunchTemplateUpdateCheckCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.PreLaunchTemplateUpdateCheckFailedReason, + }) return errors.New("Cannot update the launch template, prerequisite not met") } } @@ -305,10 +359,18 @@ func (s *Service) ReconcileLaunchTemplate( if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged { if err := runPostLaunchTemplateUpdateOperation(); err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition, expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.PostLaunchTemplateUpdateOperationCondition, + Status: metav1.ConditionFalse, + Reason: expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition) + conditions.Set(scope.GetSetter(), metav1.Condition{ + Type: expinfrav1.PostLaunchTemplateUpdateOperationCondition, + Status: metav1.ConditionTrue, + }) } return nil @@ -1042,7 +1104,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau } templateVersion := scope.GetMachinePool().Spec.Template.Spec.Version - if templateVersion == nil { + if templateVersion == "" { err := errors.New("Either AWSMachinePool's spec.awslaunchtemplate.ami.id or MachinePool's spec.template.spec.version must be defined") s.scope.Error(err, "") return nil, err @@ -1083,7 +1145,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" { lookupAMI, err = s.eksAMILookup( ctx, - *templateVersion, + templateVersion, imageArchitecture, scope.GetLaunchTemplate().AMI.EKSOptimizedLookupType, ) @@ -1096,7 +1158,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau imageLookupOrg, imageLookupBaseOS, imageArchitecture, - *templateVersion, + templateVersion, ) if err != nil { return nil, err diff --git a/pkg/cloud/services/ec2/launchtemplate_test.go b/pkg/cloud/services/ec2/launchtemplate_test.go index fd4ff8c81a..fff8074586 100644 --- a/pkg/cloud/services/ec2/launchtemplate_test.go +++ b/pkg/cloud/services/ec2/launchtemplate_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index a16e6d1d34..13d85c41f9 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -27,10 +27,12 @@ import ( ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/blang/semver" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" "k8s.io/klog/v2" "k8s.io/utils/ptr" + controlplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" @@ -40,7 +42,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cmp" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/tristate" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -195,16 +197,29 @@ func (s *Service) setStatus(cluster *ekstypes.Cluster) error { s.scope.ControlPlane.Status.Ready = false // TODO FailureReason failureMsg := fmt.Sprintf("EKS cluster in unexpected %s state", cluster.Status) - s.scope.ControlPlane.Status.FailureMessage = &failureMsg + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: controlplanev1.EKSControlPlaneReadyCondition, + Status: metav1.StatusFailure, + Reason: clusterv1.ClusterControlPlaneNotAvailableReason, + Message: failureMsg, + }) case ekstypes.ClusterStatusActive: s.scope.ControlPlane.Status.Ready = true - s.scope.ControlPlane.Status.FailureMessage = nil + if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) { record.Eventf(s.scope.ControlPlane, "SuccessfulCreateEKSControlPlane", "Created new EKS control plane %s", s.scope.KubernetesClusterName()) - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition, "created", clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.EKSControlPlaneCreatingCondition, + Status: metav1.ConditionFalse, + Reason: "created", + }) } if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition, "updated", clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.EKSControlPlaneUpdatingCondition, + Status: metav1.ConditionTrue, + Reason: "updated", + }) record.Eventf(s.scope.ControlPlane, "SuccessfulUpdateEKSControlPlane", "Updated EKS control plane %s", s.scope.KubernetesClusterName()) } // TODO FailureReason @@ -297,8 +312,8 @@ func makeEksEncryptionConfigs(encryptionConfig *ekscontrolplanev1.EncryptionConf }) } -func makeKubernetesNetworkConfig(serviceCidrs *clusterv1.NetworkRanges) (*ekstypes.KubernetesNetworkConfigRequest, error) { - if serviceCidrs == nil || len(serviceCidrs.CIDRBlocks) == 0 { +func makeKubernetesNetworkConfig(serviceCidrs clusterv1.NetworkRanges) (*ekstypes.KubernetesNetworkConfigRequest, error) { + if len(serviceCidrs.CIDRBlocks) == 0 { return nil, nil } @@ -478,7 +493,10 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek if out, err = s.EKSClient.CreateCluster(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.EKSControlPlaneCreatingCondition, + Status: metav1.ConditionTrue, + }) record.Eventf(s.scope.ControlPlane, "InitiatedCreateEKSControlPlane", "Initiated creation of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }, awserrors.ResourceNotFound); err != nil { // TODO: change the error that can be retried @@ -531,7 +549,10 @@ func (s *Service) reconcileClusterConfig(ctx context.Context, cluster *ekstypes. if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.EKSControlPlaneUpdatingCondition, + Status: metav1.ConditionTrue, + }) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -559,7 +580,10 @@ func (s *Service) reconcileLogging(ctx context.Context, logging *ekstypes.Loggin if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.EKSControlPlaneUpdatingCondition, + Status: metav1.ConditionTrue, + }) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated logging update for EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -707,7 +731,10 @@ func (s *Service) reconcileClusterVersion(ctx context.Context, cluster *ekstypes return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.EKSControlPlaneUpdatingCondition, + Status: metav1.ConditionTrue, + }) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of EKS control plane %s to version %s", s.scope.KubernetesClusterName(), nextVersionString) return true, nil @@ -758,7 +785,10 @@ func (s *Service) updateEncryptionConfig(ctx context.Context, updatedEncryptionC return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.Set(s.scope.ControlPlane, metav1.Condition{ + Type: ekscontrolplanev1.EKSControlPlaneUpdatingCondition, + Status: metav1.ConditionTrue, + }) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEncryptionConfig", "Initiated update of encryption config in EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go index 7e397f329e..9069eada26 100644 --- a/pkg/cloud/services/eks/cluster_test.go +++ b/pkg/cloud/services/eks/cluster_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestMakeEKSEncryptionConfigs(t *testing.T) { diff --git a/pkg/cloud/services/eks/config.go b/pkg/cloud/services/eks/config.go index 153f293682..ba11e4bcd7 100644 --- a/pkg/cloud/services/eks/config.go +++ b/pkg/cloud/services/eks/config.go @@ -35,7 +35,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" diff --git a/pkg/cloud/services/eks/config_test.go b/pkg/cloud/services/eks/config_test.go index d6f64bd071..f1d0d2d299 100644 --- a/pkg/cloud/services/eks/config_test.go +++ b/pkg/cloud/services/eks/config_test.go @@ -20,7 +20,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/secret" ) diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go index 05b760fa44..79a83198a9 100644 --- a/pkg/cloud/services/eks/eks.go +++ b/pkg/cloud/services/eks/eks.go @@ -27,8 +27,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ReconcileControlPlane reconciles a EKS control plane. diff --git a/pkg/cloud/services/eks/fargate.go b/pkg/cloud/services/eks/fargate.go index 06cd5ffac2..ca79ead4b0 100644 --- a/pkg/cloud/services/eks/fargate.go +++ b/pkg/cloud/services/eks/fargate.go @@ -32,8 +32,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func requeueProfileUpdating() reconcile.Result { diff --git a/pkg/cloud/services/eks/nodegroup.go b/pkg/cloud/services/eks/nodegroup.go index eb1430ffe6..708ccaf6b1 100644 --- a/pkg/cloud/services/eks/nodegroup.go +++ b/pkg/cloud/services/eks/nodegroup.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" ) diff --git a/pkg/cloud/services/eks/oidc_test.go b/pkg/cloud/services/eks/oidc_test.go index 8a57b330e1..71c95732f6 100644 --- a/pkg/cloud/services/eks/oidc_test.go +++ b/pkg/cloud/services/eks/oidc_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/testcert" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestOIDCReconcile(t *testing.T) { diff --git a/pkg/cloud/services/eks/roles.go b/pkg/cloud/services/eks/roles.go index 6d813ef416..734805b0ea 100644 --- a/pkg/cloud/services/eks/roles.go +++ b/pkg/cloud/services/eks/roles.go @@ -31,7 +31,7 @@ import ( eksiam "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/iam" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 874ea2d815..8917104909 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -34,6 +34,7 @@ import ( rgapitypes "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types" "github.com/google/go-cmp/cmp" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/storage/names" @@ -46,7 +47,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/hash" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -682,7 +683,11 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + }) if err := s.scope.PatchObject(); err != nil { return err } @@ -690,7 +695,11 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { apiELB, err := s.describeClassicELB(ctx, elbName) if IsNotFound(err) { s.scope.Debug("Control plane load balancer not found, skipping deletion") - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + }) return nil } if err != nil { @@ -699,13 +708,22 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { if apiELB.IsUnmanaged(s.scope.Name()) { s.scope.Debug("Found unmanaged classic load balancer for apiserver, skipping deletion", "api-server-elb-name", apiELB.Name) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + }) return nil } s.scope.Debug("deleting load balancer", "name", elbName) if err := s.deleteClassicELB(ctx, elbName); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } @@ -717,7 +735,11 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + }) s.scope.Info("Deleted control plane load balancer", "name", elbName) return nil } @@ -792,7 +814,11 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad if err != nil { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + }) if err := s.scope.PatchObject(); err != nil { return err } @@ -811,7 +837,12 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad } s.scope.Debug("deleting load balancer", "name", name) if err := s.deleteLB(ctx, lb.ARN); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } @@ -823,7 +854,11 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.LoadBalancerReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + }) s.scope.Info("Deleted control plane load balancer", "name", name) return nil diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go index d59c15c91b..ec17e99d5e 100644 --- a/pkg/cloud/services/elb/loadbalancer_test.go +++ b/pkg/cloud/services/elb/loadbalancer_test.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -2969,7 +2969,7 @@ func TestDeleteAPIServerELB(t *testing.T) { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + if loadBalancerConditionReason != clusterv1.DeletedV1Beta1Reason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, @@ -3018,7 +3018,7 @@ func TestDeleteAPIServerELB(t *testing.T) { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + if loadBalancerConditionReason != clusterv1.DeletedV1Beta1Reason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, @@ -3080,7 +3080,7 @@ func TestDeleteAPIServerELB(t *testing.T) { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + if loadBalancerConditionReason != clusterv1.DeletedV1Beta1Reason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, diff --git a/pkg/cloud/services/gc/cleanup_test.go b/pkg/cloud/services/gc/cleanup_test.go index 363ed94364..0be1138c27 100644 --- a/pkg/cloud/services/gc/cleanup_test.go +++ b/pkg/cloud/services/gc/cleanup_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileDelete(t *testing.T) { diff --git a/pkg/cloud/services/iamauth/reconcile.go b/pkg/cloud/services/iamauth/reconcile.go index c3a6407940..0f47a98634 100644 --- a/pkg/cloud/services/iamauth/reconcile.go +++ b/pkg/cloud/services/iamauth/reconcile.go @@ -30,8 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ReconcileIAMAuthenticator is used to create the aws-iam-authenticator in a cluster. @@ -152,7 +151,7 @@ func (s *Service) getRolesForMachineDeployments(ctx context.Context, allRoles ma } func (s *Service) getRolesForMachinePools(ctx context.Context, allRoles map[string]struct{}) error { - machinePoolList := &expclusterv1.MachinePoolList{} + machinePoolList := &clusterv1.MachinePoolList{} selectors := []client.ListOption{ client.InNamespace(s.scope.Namespace()), client.MatchingLabels{ diff --git a/pkg/cloud/services/iamauth/reconcile_test.go b/pkg/cloud/services/iamauth/reconcile_test.go index 91b1d4b9a0..ae8ae86556 100644 --- a/pkg/cloud/services/iamauth/reconcile_test.go +++ b/pkg/cloud/services/iamauth/reconcile_test.go @@ -31,8 +31,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" ) @@ -146,8 +145,8 @@ func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterN return awsMP } -func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *expclusterv1.MachinePool { - mp := &expclusterv1.MachinePool{ +func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachinePool { + mp := &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -155,7 +154,7 @@ func createMachinepoolForCluster(name, namespace, clusterName string, infrastruc clusterv1.ClusterNameLabel: clusterName, }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: clusterName, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ diff --git a/pkg/cloud/services/iamauth/suite_test.go b/pkg/cloud/services/iamauth/suite_test.go index d94ce1bfaf..eaf1dda70f 100644 --- a/pkg/cloud/services/iamauth/suite_test.go +++ b/pkg/cloud/services/iamauth/suite_test.go @@ -29,8 +29,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( @@ -48,9 +47,7 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), diff --git a/pkg/cloud/services/instancestate/helpers_test.go b/pkg/cloud/services/instancestate/helpers_test.go index 5e004e08f5..d2d10b05ca 100644 --- a/pkg/cloud/services/instancestate/helpers_test.go +++ b/pkg/cloud/services/instancestate/helpers_test.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func setupCluster(clusterName string) (*scope.ClusterScope, error) { diff --git a/pkg/cloud/services/mock_services/gomock_reflect_1375320870/prog.go b/pkg/cloud/services/mock_services/gomock_reflect_1375320870/prog.go new file mode 100644 index 0000000000..589ee277d6 --- /dev/null +++ b/pkg/cloud/services/mock_services/gomock_reflect_1375320870/prog.go @@ -0,0 +1,66 @@ + +package main + +import ( + "encoding/gob" + "flag" + "fmt" + "os" + "path" + "reflect" + + "github.com/golang/mock/mockgen/model" + + pkg_ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" +) + +var output = flag.String("output", "", "The output file name, or empty to use stdout.") + +func main() { + flag.Parse() + + its := []struct{ + sym string + typ reflect.Type + }{ + + { "SecretInterface", reflect.TypeOf((*pkg_.SecretInterface)(nil)).Elem()}, + + } + pkg := &model.Package{ + // NOTE: This behaves contrary to documented behaviour if the + // package name is not the final component of the import path. + // The reflect package doesn't expose the package name, though. + Name: path.Base("sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services"), + } + + for _, it := range its { + intf, err := model.InterfaceFromInterfaceType(it.typ) + if err != nil { + fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) + os.Exit(1) + } + intf.Name = it.sym + pkg.Interfaces = append(pkg.Interfaces, intf) + } + + outfile := os.Stdout + if len(*output) != 0 { + var err error + outfile, err = os.Create(*output) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file %q", *output) + } + defer func() { + if err := outfile.Close(); err != nil { + fmt.Fprintf(os.Stderr, "failed to close output file %q", *output) + os.Exit(1) + } + }() + } + + if err := gob.NewEncoder(outfile).Encode(pkg); err != nil { + fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) + os.Exit(1) + } +} diff --git a/pkg/cloud/services/network/carriergateways.go b/pkg/cloud/services/network/carriergateways.go index af89c43bc4..6b91276d1a 100644 --- a/pkg/cloud/services/network/carriergateways.go +++ b/pkg/cloud/services/network/carriergateways.go @@ -24,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -78,7 +79,10 @@ func (s *Service) reconcileCarrierGateway() error { record.Warnf(s.scope.InfraCluster(), "FailedTagCarrierGateway", "Failed to tag managed Carrier Gateway %q: %v", cagw.CarrierGatewayId, err) return errors.Wrapf(err, "failed to tag carrier gateway %q", *cagw.CarrierGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.CarrierGatewayReadyCondition, + Status: metav1.ConditionTrue, + }) return nil } diff --git a/pkg/cloud/services/network/carriergateways_test.go b/pkg/cloud/services/network/carriergateways_test.go index c23a873c02..a1bc0d8ac6 100644 --- a/pkg/cloud/services/network/carriergateways_test.go +++ b/pkg/cloud/services/network/carriergateways_test.go @@ -33,7 +33,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileCarrierGateway(t *testing.T) { diff --git a/pkg/cloud/services/network/egress_only_gateways.go b/pkg/cloud/services/network/egress_only_gateways.go index e710adecf7..f8ce0d0085 100644 --- a/pkg/cloud/services/network/egress_only_gateways.go +++ b/pkg/cloud/services/network/egress_only_gateways.go @@ -24,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -79,7 +80,10 @@ func (s *Service) reconcileEgressOnlyInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagEgressOnlyInternetGateway", "Failed to tag managed Egress Only Internet Gateway %q: %v", gateway.EgressOnlyInternetGatewayId, err) return errors.Wrapf(err, "failed to tag egress only internet gateway %q", *gateway.EgressOnlyInternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.EgressOnlyInternetGatewayReadyCondition, + Status: metav1.ConditionTrue, + }) return nil } diff --git a/pkg/cloud/services/network/egress_only_gateways_test.go b/pkg/cloud/services/network/egress_only_gateways_test.go index fbd859ab80..190c5a44dd 100644 --- a/pkg/cloud/services/network/egress_only_gateways_test.go +++ b/pkg/cloud/services/network/egress_only_gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileEgressOnlyInternetGateways(t *testing.T) { diff --git a/pkg/cloud/services/network/eips_test.go b/pkg/cloud/services/network/eips_test.go index 53dbc23dd2..643f9bb177 100644 --- a/pkg/cloud/services/network/eips_test.go +++ b/pkg/cloud/services/network/eips_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceReleaseAddresses(t *testing.T) { diff --git a/pkg/cloud/services/network/gateways.go b/pkg/cloud/services/network/gateways.go index ee9fa65692..27ad6f5df0 100644 --- a/pkg/cloud/services/network/gateways.go +++ b/pkg/cloud/services/network/gateways.go @@ -24,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -74,7 +75,10 @@ func (s *Service) reconcileInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagInternetGateway", "Failed to tag managed Internet Gateway %q: %v", gateway.InternetGatewayId, err) return errors.Wrapf(err, "failed to tag internet gateway %q", *gateway.InternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.InternetGatewayReadyCondition, + Status: metav1.ConditionTrue, + }) return nil } diff --git a/pkg/cloud/services/network/gateways_test.go b/pkg/cloud/services/network/gateways_test.go index 62d35e3b69..48c0b5b047 100644 --- a/pkg/cloud/services/network/gateways_test.go +++ b/pkg/cloud/services/network/gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileInternetGateways(t *testing.T) { diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 6bde5f5a64..108a91937f 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -26,6 +26,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -36,7 +37,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -54,21 +54,21 @@ func (s *Service) reconcileNatGateways() error { if len(s.scope.Subnets().FilterPrivate().FilterNonCni()) == 0 { s.scope.Debug("No private subnets available, skipping NAT gateways") - conditions.MarkFalse( - s.scope.InfraCluster(), - infrav1.NatGatewaysReadyCondition, - infrav1.NatGatewaysReconciliationFailedReason, - clusterv1.ConditionSeverityWarning, - "No private subnets available, skipping NAT gateways") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.NatGatewaysReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.NatGatewaysReconciliationFailedReason, + Message: "No private subnets available, skipping NAT gateways", + }) return nil } else if len(s.scope.Subnets().FilterPublic().FilterNonCni()) == 0 { s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") - conditions.MarkFalse( - s.scope.InfraCluster(), - infrav1.NatGatewaysReadyCondition, - infrav1.NatGatewaysReconciliationFailedReason, - clusterv1.ConditionSeverityWarning, - "No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.NatGatewaysReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.NatGatewaysReconciliationFailedReason, + Message: "No private subnets available, skipping NAT gateways", + }) return nil } @@ -81,7 +81,11 @@ func (s *Service) reconcileNatGateways() error { if len(subnetIDs) > 0 { // set NatGatewayCreationStarted if the condition has never been set before if !conditions.Has(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.NatGatewaysReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.NatGatewaysCreationStartedReason, + }) if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -100,7 +104,10 @@ func (s *Service) reconcileNatGateways() error { if err != nil { return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.NatGatewaysReadyCondition, + Status: metav1.ConditionTrue, + }) } return nil diff --git a/pkg/cloud/services/network/natgateways_test.go b/pkg/cloud/services/network/natgateways_test.go index 79277625b5..97d7ff8900 100644 --- a/pkg/cloud/services/network/natgateways_test.go +++ b/pkg/cloud/services/network/natgateways_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index 35aa421be7..3ca13097e6 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -17,12 +17,14 @@ limitations under the License. package network import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" - infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -32,66 +34,138 @@ func (s *Service) ReconcileNetwork() (err error) { // VPC. if err := s.reconcileVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.VpcReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcReadyCondition, + Status: metav1.ConditionTrue, + }) // Secondary CIDRs if err := s.associateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.SecondaryCidrReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.SecondaryCidrsReadyCondition, + Status: metav1.ConditionTrue, + }) // Subnets. if err := s.reconcileSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, infrav1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.SubnetsReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.SubnetsReadyCondition, + Status: metav1.ConditionTrue, + }) // Internet Gateways. if err := s.reconcileInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, infrav1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.InternetGatewayFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.InternetGatewayReadyCondition, + Status: metav1.ConditionTrue, + }) // Carrier Gateway. if err := s.reconcileCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.CarrierGatewayFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.CarrierGatewayReadyCondition, + Status: metav1.ConditionTrue, + }) // Egress Only Internet Gateways. if err := s.reconcileEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.EgressOnlyInternetGatewayFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.EgressOnlyInternetGatewayReadyCondition, + Status: metav1.ConditionTrue, + }) // NAT Gateways. if err := s.reconcileNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.NatGatewaysReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.NatGatewaysReadyCondition, + Status: metav1.ConditionTrue, + }) // Routing tables. if err := s.reconcileRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, infrav1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.RouteTableReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.RouteTablesReadyCondition, + Status: metav1.ConditionTrue, + }) // VPC Endpoints. if err := s.reconcileVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, infrav1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: infrav1.VpcEndpointsReconciliationFailedReason, + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionTrue, + }) s.scope.Debug("Reconcile network completed successfully") return nil @@ -120,40 +194,85 @@ func (s *Service) DeleteNetwork() (err error) { vpc.DeepCopyInto(s.scope.VPC()) // VPC Endpoints. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) // Routing tables. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) // NAT Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) // EIPs. if err := s.releaseAddresses(); err != nil { @@ -161,68 +280,148 @@ func (s *Service) DeleteNetwork() (err error) { } // Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) // Carrier Gateway. if s.scope.VPC().CarrierGatewayID != nil { if err := s.deleteCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) } // Egress Only Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) // Subnets. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) // Secondary CIDR. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.disassociateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DisassociateFailed", + Message: fmt.Sprintf("%s", err), + }) return err } // VPC. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + Message: fmt.Sprintf("%s", err), + }) if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcEndpointsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + Message: fmt.Sprintf("%s", err), + }) s.scope.Debug("Delete network completed successfully") return nil diff --git a/pkg/cloud/services/network/routetables.go b/pkg/cloud/services/network/routetables.go index 21dd039ff1..3eb4398351 100644 --- a/pkg/cloud/services/network/routetables.go +++ b/pkg/cloud/services/network/routetables.go @@ -24,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -122,7 +123,10 @@ func (s *Service) reconcileRouteTables() error { s.scope.Debug("Subnet has been associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", rt.ID) sn.RouteTableID = aws.String(rt.ID) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.RouteTablesReadyCondition, + Status: metav1.ConditionTrue, + }) return nil } diff --git a/pkg/cloud/services/network/routetables_test.go b/pkg/cloud/services/network/routetables_test.go index eb131b8217..6b81c91585 100644 --- a/pkg/cloud/services/network/routetables_test.go +++ b/pkg/cloud/services/network/routetables_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileRouteTables(t *testing.T) { diff --git a/pkg/cloud/services/network/secondarycidr_test.go b/pkg/cloud/services/network/secondarycidr_test.go index 3296072299..446e793575 100644 --- a/pkg/cloud/services/network/secondarycidr_test.go +++ b/pkg/cloud/services/network/secondarycidr_test.go @@ -35,13 +35,13 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupNewManagedControlPlaneScope(cl client.Client) (*scope.ManagedControlPlaneScope, error) { return scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ Client: cl, - Cluster: &v1beta1.Cluster{}, + Cluster: &v1beta2.Cluster{}, ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ SecondaryCidrBlock: ptr.To[string]("secondary-cidr"), diff --git a/pkg/cloud/services/network/subnets.go b/pkg/cloud/services/network/subnets.go index f339a9a8c0..c9e2ee6261 100644 --- a/pkg/cloud/services/network/subnets.go +++ b/pkg/cloud/services/network/subnets.go @@ -29,6 +29,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -205,7 +206,10 @@ func (s *Service) reconcileSubnets() error { } s.scope.Debug("Reconciled subnets", "subnets", subnets) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.SubnetsReadyCondition, + Status: metav1.ConditionTrue, + }) return nil } diff --git a/pkg/cloud/services/network/subnets_test.go b/pkg/cloud/services/network/subnets_test.go index f14c9b7deb..48238d2f77 100644 --- a/pkg/cloud/services/network/subnets_test.go +++ b/pkg/cloud/services/network/subnets_test.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/network/vpc.go b/pkg/cloud/services/network/vpc.go index 078afd1dd7..e665905f2d 100644 --- a/pkg/cloud/services/network/vpc.go +++ b/pkg/cloud/services/network/vpc.go @@ -24,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -36,7 +37,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -138,7 +138,11 @@ func (s *Service) reconcileVPC() error { s.scope.VPC().ID = vpc.ID if !conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.VpcReadyCondition, + Status: metav1.ConditionTrue, + Reason: infrav1.VpcCreationStartedReason, + }) if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } diff --git a/pkg/cloud/services/network/vpc_test.go b/pkg/cloud/services/network/vpc_test.go index 9c2f5f3a22..0c5f086962 100644 --- a/pkg/cloud/services/network/vpc_test.go +++ b/pkg/cloud/services/network/vpc_test.go @@ -34,7 +34,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func describeVpcAttributeTrue(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...ec2.Options) (*ec2.DescribeVpcAttributeOutput, error) { diff --git a/pkg/cloud/services/s3/mock_s3iface/gomock_reflect_2183343406/prog.go b/pkg/cloud/services/s3/mock_s3iface/gomock_reflect_2183343406/prog.go new file mode 100644 index 0000000000..7df9cd3486 --- /dev/null +++ b/pkg/cloud/services/s3/mock_s3iface/gomock_reflect_2183343406/prog.go @@ -0,0 +1,66 @@ + +package main + +import ( + "encoding/gob" + "flag" + "fmt" + "os" + "path" + "reflect" + + "github.com/golang/mock/mockgen/model" + + pkg_ "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" +) + +var output = flag.String("output", "", "The output file name, or empty to use stdout.") + +func main() { + flag.Parse() + + its := []struct{ + sym string + typ reflect.Type + }{ + + { "S3API", reflect.TypeOf((*pkg_.S3API)(nil)).Elem()}, + + } + pkg := &model.Package{ + // NOTE: This behaves contrary to documented behaviour if the + // package name is not the final component of the import path. + // The reflect package doesn't expose the package name, though. + Name: path.Base("sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3"), + } + + for _, it := range its { + intf, err := model.InterfaceFromInterfaceType(it.typ) + if err != nil { + fmt.Fprintf(os.Stderr, "Reflection: %v\n", err) + os.Exit(1) + } + intf.Name = it.sym + pkg.Interfaces = append(pkg.Interfaces, intf) + } + + outfile := os.Stdout + if len(*output) != 0 { + var err error + outfile, err = os.Create(*output) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file %q", *output) + } + defer func() { + if err := outfile.Close(); err != nil { + fmt.Fprintf(os.Stderr, "failed to close output file %q", *output) + os.Exit(1) + } + }() + } + + if err := gob.NewEncoder(outfile).Encode(pkg); err != nil { + fmt.Fprintf(os.Stderr, "gob encode: %v\n", err) + os.Exit(1) + } +} diff --git a/pkg/cloud/services/s3/s3_test.go b/pkg/cloud/services/s3/s3_test.go index 378d3114d3..22e4475877 100644 --- a/pkg/cloud/services/s3/s3_test.go +++ b/pkg/cloud/services/s3/s3_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_s3iface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go index 2f9b83dc40..c7d898a034 100644 --- a/pkg/cloud/services/secretsmanager/secret_test.go +++ b/pkg/cloud/services/secretsmanager/secret_test.go @@ -35,7 +35,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceCreate(t *testing.T) { diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index 9de501f7c5..768dd38a0b 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -25,6 +25,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -38,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -197,7 +198,10 @@ func (s *Service) ReconcileSecurityGroups() error { s.scope.Debug("Authorized ingress rules in security group", "authorized-ingress-rules", toAuthorize, "security-group-id", sg.ID) } } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionTrue, + }) return nil } @@ -308,7 +312,11 @@ func (s *Service) ec2SecurityGroupToSecurityGroup(ec2SecurityGroup types.Securit func (s *Service) DeleteSecurityGroups() error { if s.scope.VPC().ID == "" { s.scope.Debug("Skipping security group deletion, vpc-id is nil", "vpc-id", s.scope.VPC().ID) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + }) return nil } @@ -321,8 +329,11 @@ func (s *Service) DeleteSecurityGroups() error { if len(clusterGroups) == 0 { return nil } - - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletingReason, + }) if err := s.scope.PatchObject(); err != nil { return err } @@ -331,7 +342,12 @@ func (s *Service) DeleteSecurityGroups() error { sg := clusterGroups[i] current := sg.IngressRules if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } @@ -343,10 +359,19 @@ func (s *Service) DeleteSecurityGroups() error { } if err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: "DeletingFailed", + Message: fmt.Sprintf("%s", err), + }) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.Set(s.scope.InfraCluster(), metav1.Condition{ + Type: infrav1.ClusterSecurityGroupsReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.DeletedV1Beta1Reason, + }) return nil } diff --git a/pkg/cloud/services/securitygroup/securitygroups_test.go b/pkg/cloud/services/securitygroup/securitygroups_test.go index 2fd1cc64db..ee20404063 100644 --- a/pkg/cloud/services/securitygroup/securitygroups_test.go +++ b/pkg/cloud/services/securitygroup/securitygroups_test.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go index d140cc9ecb..abb06b6d49 100644 --- a/pkg/cloud/services/ssm/secret_test.go +++ b/pkg/cloud/services/ssm/secret_test.go @@ -38,7 +38,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) type mockAPIError struct { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index c056042fba..eba75fe50d 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -9,20 +9,19 @@ import ( "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GetMachinePools belong to a cluster. -func GetMachinePools(ctx context.Context, client crclient.Client, clusterName string, clusterNS string) ([]expclusterv1.MachinePool, error) { - machinePoolList := expclusterv1.MachinePoolList{} +func GetMachinePools(ctx context.Context, client crclient.Client, clusterName string, clusterNS string) ([]clusterv1.MachinePool, error) { + machinePoolList := clusterv1.MachinePoolList{} listOptions := []crclient.ListOption{ crclient.InNamespace(clusterNS), crclient.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), } if err := client.List(ctx, &machinePoolList, listOptions...); err != nil { - return []expclusterv1.MachinePool{}, fmt.Errorf("failed to list machine pools for cluster %s: %v", clusterName, err) + return []clusterv1.MachinePool{}, fmt.Errorf("failed to list machine pools for cluster %s: %v", clusterName, err) } return machinePoolList.Items, nil diff --git a/test/e2e/shared/common.go b/test/e2e/shared/common.go index 53bce01ae4..56e65a7763 100644 --- a/test/e2e/shared/common.go +++ b/test/e2e/shared/common.go @@ -37,7 +37,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" diff --git a/test/e2e/shared/gpu.go b/test/e2e/shared/gpu.go index b871b2f010..5b05bd9316 100644 --- a/test/e2e/shared/gpu.go +++ b/test/e2e/shared/gpu.go @@ -125,7 +125,7 @@ func WaitForJobComplete(ctx context.Context, input WaitForJobCompleteInput, inte key := crclient.ObjectKey{Namespace: namespace, Name: name} if err := input.Getter.Get(ctx, key, input.Job); err == nil { for _, c := range input.Job.Status.Conditions { - if c.Type == batchv1.JobComplete && c.Status == corev1.ConditionTrue { + if c.Type == batchv1.JobComplete && c.Status == metav1.ConditionTrue { return input.Job.Status.Succeeded > 0 } } diff --git a/test/e2e/shared/workload.go b/test/e2e/shared/workload.go index 343a97ded4..57aa70928c 100644 --- a/test/e2e/shared/workload.go +++ b/test/e2e/shared/workload.go @@ -72,7 +72,7 @@ func WaitForDeploymentsAvailable(ctx context.Context, input WaitForDeploymentsAv return false } for _, c := range deployment.Status.Conditions { - if c.Type == appsv1.DeploymentAvailable && c.Status == corev1.ConditionTrue { + if c.Type == appsv1.DeploymentAvailable && c.Status == metav1.ConditionTrue { return true } } diff --git a/test/e2e/suites/managed/control_plane_helpers.go b/test/e2e/suites/managed/control_plane_helpers.go index 0178236d32..46c6259ecc 100644 --- a/test/e2e/suites/managed/control_plane_helpers.go +++ b/test/e2e/suites/managed/control_plane_helpers.go @@ -33,7 +33,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" clusterctl "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment.go b/test/e2e/suites/managed/machine_deployment.go index 4ef19a0f8d..35d19643b2 100644 --- a/test/e2e/suites/managed/machine_deployment.go +++ b/test/e2e/suites/managed/machine_deployment.go @@ -30,7 +30,7 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment_helpers.go b/test/e2e/suites/managed/machine_deployment_helpers.go index e156b4ac51..ee6cc925ba 100644 --- a/test/e2e/suites/managed/machine_deployment_helpers.go +++ b/test/e2e/suites/managed/machine_deployment_helpers.go @@ -28,7 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" ) diff --git a/test/e2e/suites/managed/machine_pool_helpers.go b/test/e2e/suites/managed/machine_pool_helpers.go index b34eb7b1b8..1592f141e5 100644 --- a/test/e2e/suites/managed/machine_pool_helpers.go +++ b/test/e2e/suites/managed/machine_pool_helpers.go @@ -28,12 +28,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" ) type deleteMachinePoolInput struct { - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool Deleter framework.Deleter } @@ -43,14 +43,14 @@ func deleteMachinePool(ctx context.Context, input deleteMachinePoolInput) { } type waitForMachinePoolDeletedInput struct { - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool Getter framework.Getter } func waitForMachinePoolDeleted(ctx context.Context, input waitForMachinePoolDeletedInput, intervals ...interface{}) { By(fmt.Sprintf("Waiting for machine pool %s to be deleted", input.MachinePool.GetName())) Eventually(func() bool { - mp := &expclusterv1.MachinePool{} + mp := &clusterv1.MachinePool{} key := client.ObjectKey{ Namespace: input.MachinePool.GetNamespace(), Name: input.MachinePool.GetName(), diff --git a/test/e2e/suites/managed/managed_suite_test.go b/test/e2e/suites/managed/managed_suite_test.go index 15fc0d0b81..afc77aeb3e 100644 --- a/test/e2e/suites/managed/managed_suite_test.go +++ b/test/e2e/suites/managed/managed_suite_test.go @@ -32,8 +32,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( @@ -90,7 +89,6 @@ func initScheme() *runtime.Scheme { _ = expinfrav1.AddToScheme(sc) _ = clusterv1.AddToScheme(sc) _ = ekscontrolplanev1.AddToScheme(sc) - _ = expclusterv1.AddToScheme(sc) return sc } diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index d8c626f079..594899aef6 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -51,12 +51,12 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // GetClusterByName returns a Cluster object given his name. @@ -414,7 +414,7 @@ func LatestCIReleaseForVersion(searchVersion string) (string, error) { type conditionAssertion struct { conditionType clusterv1.ConditionType - status corev1.ConditionStatus + status metav1.ConditionStatus severity clusterv1.ConditionSeverity reason string } diff --git a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go index 1ef8cf8950..8bff9f3904 100644 --- a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go @@ -40,7 +40,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go index 5fa396d3db..d1a773aa55 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go @@ -93,7 +93,7 @@ var _ = ginkgo.Context("[unmanaged] [functional] [ClusterClass]", func() { return false, nil } - if !hasAWSClusterConditions(awsCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionTrue, "", ""}}) { + if !hasAWSClusterConditions(awsCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, metav1.ConditionTrue, "", ""}}) { ginkgo.By("AWSCluster missing bastion host ready condition") return false, nil } diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index f4d6d42e94..a204e23c4d 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -41,11 +41,11 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/instancestate" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const TestSvc = "test-svc-" @@ -215,7 +215,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { return false, nil } - if !hasAWSClusterConditions(awsCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionTrue, "", ""}}) { + if !hasAWSClusterConditions(awsCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, metav1.ConditionTrue, "", ""}}) { ginkgo.By("AWSCluster missing bastion host ready condition") return false, nil } diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 43f0618b0c..a4363e9379 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -51,7 +51,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) diff --git a/test/mocks/capa_clusterscoper_mock.go b/test/mocks/capa_clusterscoper_mock.go index e3664a61e0..d12d4031a6 100644 --- a/test/mocks/capa_clusterscoper_mock.go +++ b/test/mocks/capa_clusterscoper_mock.go @@ -33,7 +33,7 @@ import ( cloud "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" throttle "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" logger "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta20 "sigs.k8s.io/cluster-api/api/core/v1beta2" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -378,7 +378,7 @@ func (mr *MockClusterScoperMockRecorder) Session() *gomock.Call { } // SetFailureDomain mocks base method. -func (m *MockClusterScoper) SetFailureDomain(arg0 string, arg1 v1beta1.FailureDomainSpec) { +func (m *MockClusterScoper) SetFailureDomain(arg0 string, arg1 []v1beta20.FailureDomain) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetFailureDomain", arg0, arg1) } diff --git a/util/clusterapiv1beta1/util.go b/util/clusterapiv1beta1/util.go new file mode 100644 index 0000000000..684ef3d07b --- /dev/null +++ b/util/clusterapiv1beta1/util.go @@ -0,0 +1,753 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package util implements utilities. +package util + +import ( + "context" + "encoding/json" + "fmt" + "math" + "math/rand" + "reflect" + "strings" + "time" + + "github.com/blang/semver/v4" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/labels/format" +) + +const ( + // CharSet defines the alphanumeric set for random string generation. + CharSet = "0123456789abcdefghijklmnopqrstuvwxyz" +) + +var ( + rnd = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec + + // ErrNoCluster is returned when the cluster + // label could not be found on the object passed in. + ErrNoCluster = fmt.Errorf("no %q label present", clusterv1.ClusterNameLabel) + + // ErrUnstructuredFieldNotFound determines that a field + // in an unstructured object could not be found. + ErrUnstructuredFieldNotFound = fmt.Errorf("field not found") +) + +// RandomString returns a random alphanumeric string. +func RandomString(n int) string { + result := make([]byte, n) + for i := range result { + result[i] = CharSet[rnd.Intn(len(CharSet))] + } + return string(result) +} + +// Ordinalize takes an int and returns the ordinalized version of it. +// Eg. 1 --> 1st, 103 --> 103rd. +func Ordinalize(n int) string { + m := map[int]string{ + 0: "th", + 1: "st", + 2: "nd", + 3: "rd", + 4: "th", + 5: "th", + 6: "th", + 7: "th", + 8: "th", + 9: "th", + } + + an := int(math.Abs(float64(n))) + if an < 10 { + return fmt.Sprintf("%d%s", n, m[an]) + } + return fmt.Sprintf("%d%s", n, m[an%10]) +} + +// IsExternalManagedControlPlane returns a bool indicating whether the control plane referenced +// in the passed Unstructured resource is an externally managed control plane such as AKS, EKS, GKE, etc. +func IsExternalManagedControlPlane(controlPlane *unstructured.Unstructured) bool { + managed, found, err := unstructured.NestedBool(controlPlane.Object, "status", "externalManagedControlPlane") + if err != nil || !found { + return false + } + return managed +} + +// GetMachineIfExists gets a machine from the API server if it exists. +func GetMachineIfExists(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) { + if c == nil { + // Being called before k8s is setup as part of control plane VM creation + return nil, nil + } + + // Machines are identified by name + machine := &clusterv1.Machine{} + err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, machine) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + return machine, nil +} + +// IsControlPlaneMachine checks machine is a control plane node. +func IsControlPlaneMachine(machine *clusterv1.Machine) bool { + _, ok := machine.Labels[clusterv1.MachineControlPlaneLabel] + return ok +} + +// IsNodeReady returns true if a node is ready. +func IsNodeReady(node *corev1.Node) bool { + for _, condition := range node.Status.Conditions { + if condition.Type == corev1.NodeReady { + return condition.Status == corev1.ConditionTrue + } + } + + return false +} + +// GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. +func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + if obj.Labels[clusterv1.ClusterNameLabel] == "" { + return nil, errors.WithStack(ErrNoCluster) + } + return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.ClusterNameLabel]) +} + +// GetOwnerCluster returns the Cluster object owning the current resource. +func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + for _, ref := range obj.GetOwnerReferences() { + if ref.Kind != "Cluster" { + continue + } + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, errors.WithStack(err) + } + if gv.Group == clusterv1.GroupVersion.Group { + return GetClusterByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetClusterByName finds and return a Cluster object using the specified params. +func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Cluster, error) { + cluster := &clusterv1.Cluster{} + key := client.ObjectKey{ + Namespace: namespace, + Name: name, + } + + if err := c.Get(ctx, key, cluster); err != nil { + return nil, errors.Wrapf(err, "failed to get Cluster/%s", name) + } + + return cluster, nil +} + +// ObjectKey returns client.ObjectKey for the object. +func ObjectKey(object metav1.Object) client.ObjectKey { + return client.ObjectKey{ + Namespace: object.GetNamespace(), + Name: object.GetName(), + } +} + +// ClusterToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// Cluster events and returns reconciliation requests for an infrastructure provider object. +func ClusterToInfrastructureMapFunc(ctx context.Context, gvk schema.GroupVersionKind, c client.Client, providerCluster client.Object) handler.MapFunc { + log := ctrl.LoggerFrom(ctx) + return func(ctx context.Context, o client.Object) []reconcile.Request { + cluster, ok := o.(*clusterv1.Cluster) + if !ok { + return nil + } + + // Return early if the InfrastructureRef is nil. + if !cluster.Spec.InfrastructureRef.IsDefined() { + return nil + } + gk := gvk.GroupKind() + // Return early if the GroupKind doesn't match what we expect. + infraGK := cluster.Spec.InfrastructureRef.GroupKind() + if gk != infraGK { + return nil + } + providerCluster := providerCluster.DeepCopyObject().(client.Object) + key := types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Spec.InfrastructureRef.Name} + + if err := c.Get(ctx, key, providerCluster); err != nil { + log.V(4).Info(fmt.Sprintf("Failed to get %T", providerCluster), "err", err) + return nil + } + + if annotations.IsExternallyManaged(providerCluster) { + log.V(4).Info(fmt.Sprintf("%T is externally managed, skipping mapping", providerCluster)) + return nil + } + + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + }, + }, + } + } +} + +// GetOwnerMachine returns the Machine object owning the current resource. +func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Machine, error) { + for _, ref := range obj.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, err + } + if ref.Kind == "Machine" && gv.Group == clusterv1.GroupVersion.Group { + return GetMachineByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetMachineByName finds and return a Machine object using the specified params. +func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) { + m := &clusterv1.Machine{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + +// MachineToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// Machine events and returns reconciliation requests for an infrastructure provider object. +func MachineToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { + return func(_ context.Context, o client.Object) []reconcile.Request { + m, ok := o.(*clusterv1.Machine) + if !ok { + return nil + } + + gk := gvk.GroupKind() + // Return early if the GroupKind doesn't match what we expect. + infraGK := m.Spec.InfrastructureRef.GroupKind() + if gk != infraGK { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Spec.InfrastructureRef.Name, + }, + }, + } + } +} + +// HasOwnerRef returns true if the OwnerReference is already in the slice. It matches based on Group, Kind and Name. +func HasOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) bool { + return indexOwnerRef(ownerReferences, ref) > -1 +} + +// EnsureOwnerRef makes sure the slice contains the OwnerReference. +// Note: EnsureOwnerRef will update the version of the OwnerReference fi it exists with a different version. It will also update the UID. +func EnsureOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) []metav1.OwnerReference { + idx := indexOwnerRef(ownerReferences, ref) + if idx == -1 { + return append(ownerReferences, ref) + } + ownerReferences[idx] = ref + return ownerReferences +} + +// ReplaceOwnerRef re-parents an object from one OwnerReference to another +// It compares strictly based on UID to avoid reparenting across an intentional deletion: if an object is deleted +// and re-created with the same name and namespace, the only way to tell there was an in-progress deletion +// is by comparing the UIDs. +func ReplaceOwnerRef(ownerReferences []metav1.OwnerReference, source metav1.Object, target metav1.OwnerReference) []metav1.OwnerReference { + fi := -1 + for index, r := range ownerReferences { + if r.UID == source.GetUID() { + fi = index + ownerReferences[index] = target + break + } + } + if fi < 0 { + ownerReferences = append(ownerReferences, target) + } + return ownerReferences +} + +// RemoveOwnerRef returns the slice of owner references after removing the supplied owner ref. +// Note: RemoveOwnerRef ignores apiVersion and UID. It will remove the passed ownerReference where it matches Name, Group and Kind. +func RemoveOwnerRef(ownerReferences []metav1.OwnerReference, inputRef metav1.OwnerReference) []metav1.OwnerReference { + if index := indexOwnerRef(ownerReferences, inputRef); index != -1 { + return append(ownerReferences[:index], ownerReferences[index+1:]...) + } + return ownerReferences +} + +// HasExactOwnerRef returns true if the exact OwnerReference is already in the slice. +// It matches based on APIVersion, Kind, Name and Controller. +func HasExactOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) bool { + for _, r := range ownerReferences { + if r.APIVersion == ref.APIVersion && + r.Kind == ref.Kind && + r.Name == ref.Name && + r.UID == ref.UID && + ptr.Deref(r.Controller, false) == ptr.Deref(ref.Controller, false) { + return true + } + } + return false +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} + +// IsOwnedByObject returns true if any of the owner references point to the given target. +// It matches the object based on the Group, Kind and Name. +func IsOwnedByObject(obj metav1.Object, target client.Object) bool { + for _, ref := range obj.GetOwnerReferences() { + if refersTo(&ref, target) { + return true + } + } + return false +} + +// IsControlledBy differs from metav1.IsControlledBy. This function matches on Group, Kind and Name. The metav1.IsControlledBy function matches on UID only. +func IsControlledBy(obj metav1.Object, owner client.Object) bool { + controllerRef := metav1.GetControllerOfNoCopy(obj) + if controllerRef == nil { + return false + } + return refersTo(controllerRef, owner) +} + +// Returns true if a and b point to the same object based on Group, Kind and Name. +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} + +// Returns true if ref refers to obj based on Group, Kind and Name. +func refersTo(ref *metav1.OwnerReference, obj client.Object) bool { + refGv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return false + } + + gvk := obj.GetObjectKind().GroupVersionKind() + return refGv.Group == gvk.Group && ref.Kind == gvk.Kind && ref.Name == obj.GetName() +} + +// UnstructuredUnmarshalField is a wrapper around json and unstructured objects to decode and copy a specific field +// value into an object. +func UnstructuredUnmarshalField(obj *unstructured.Unstructured, v interface{}, fields ...string) error { + if obj == nil || obj.Object == nil { + return errors.Errorf("failed to unmarshal unstructured object: object is nil") + } + + value, found, err := unstructured.NestedFieldNoCopy(obj.Object, fields...) + if err != nil { + return errors.Wrapf(err, "failed to retrieve field %q from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + if !found || value == nil { + return ErrUnstructuredFieldNotFound + } + valueBytes, err := json.Marshal(value) + if err != nil { + return errors.Wrapf(err, "failed to json-encode field %q value from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + if err := json.Unmarshal(valueBytes, v); err != nil { + return errors.Wrapf(err, "failed to json-decode field %q value from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + return nil +} + +// HasOwner checks if any of the references in the passed list match the given group from apiVersion and one of the given kinds. +func HasOwner(refList []metav1.OwnerReference, apiVersion string, kinds []string) bool { + gv, err := schema.ParseGroupVersion(apiVersion) + if err != nil { + return false + } + + kindMap := make(map[string]bool) + for _, kind := range kinds { + kindMap[kind] = true + } + + for _, mr := range refList { + mrGroupVersion, err := schema.ParseGroupVersion(mr.APIVersion) + if err != nil { + return false + } + + if mrGroupVersion.Group == gv.Group && kindMap[mr.Kind] { + return true + } + } + + return false +} + +// ClusterToTypedObjectsMapper returns a mapper function that gets a cluster and lists all objects for the object passed in +// and returns a list of requests. +// Note: This function uses the passed in typed ObjectList and thus with the default client configuration all list calls +// will be cached. +// NB: The objects are required to have `clusterv1.ClusterNameLabel` applied. +func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + cluster, ok := o.(*clusterv1.Cluster) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(cluster.Namespace)) + } + + // Note: We have to DeepCopy objectList into a new variable. Otherwise + // we have a race condition between DeepCopyObject and client.List if this + // mapper func is called concurrently. + objectList := objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// MachineDeploymentToObjectsMapper returns a mapper function that gets a machinedeployment +// and lists all objects for the object passed in and returns a list of requests. +// NB: The objects are required to have `clusterv1.MachineDeploymentNameLabel` applied. +func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + md, ok := o.(*clusterv1.MachineDeployment) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineDeploymentNameLabel: md.Name, + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(md.Namespace)) + } + + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// MachineSetToObjectsMapper returns a mapper function that gets a machineset +// and lists all objects for the object passed in and returns a list of requests. +// NB: The objects are required to have `clusterv1.MachineSetNameLabel` applied. +func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + ms, ok := o.(*clusterv1.MachineSet) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineSetNameLabel: format.MustFormatValue(ms.Name), + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(ms.Namespace)) + } + + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// isAPINamespaced detects if a GroupVersionKind is namespaced. +func isAPINamespaced(gk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restMapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + switch restMapping.Scope.Name() { + case "": + return false, errors.New("Scope cannot be identified. Empty scope returned") + case meta.RESTScopeNameRoot: + return false, nil + default: + return true, nil + } +} + +// ObjectReferenceToUnstructured converts an object reference to an unstructured object. +func ObjectReferenceToUnstructured(in corev1.ObjectReference) *unstructured.Unstructured { + out := &unstructured.Unstructured{} + out.SetKind(in.Kind) + out.SetAPIVersion(in.APIVersion) + out.SetNamespace(in.Namespace) + out.SetName(in.Name) + return out +} + +// IsSupportedVersionSkew will return true if a and b are no more than one minor version off from each other. +func IsSupportedVersionSkew(a, b semver.Version) bool { + if a.Major != b.Major { + return false + } + if a.Minor > b.Minor { + return a.Minor-b.Minor == 1 + } + return b.Minor-a.Minor <= 1 +} + +// LowestNonZeroResult compares two reconciliation results +// and returns the one with lowest requeue time. +func LowestNonZeroResult(i, j ctrl.Result) ctrl.Result { + switch { + case i.IsZero(): + return j + case j.IsZero(): + return i + case i.Requeue: + return i + case j.Requeue: + return j + case i.RequeueAfter < j.RequeueAfter: + return i + default: + return j + } +} + +// LowestNonZeroInt32 returns the lowest non-zero value of the two provided values. +func LowestNonZeroInt32(i, j int32) int32 { + if i == 0 { + return j + } + if j == 0 { + return i + } + if i < j { + return i + } + return j +} + +// IsNil returns an error if the passed interface is equal to nil or if it has an interface value of nil. +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Slice, reflect.Interface, reflect.UnsafePointer, reflect.Func: + return reflect.ValueOf(i).IsValid() && reflect.ValueOf(i).IsNil() + } + return false +} + +// MergeMap merges maps. +// NOTE: In case a key exists in multiple maps, the value of the first map is preserved. +func MergeMap(maps ...map[string]string) map[string]string { + m := make(map[string]string) + for i := len(maps) - 1; i >= 0; i-- { + for k, v := range maps[i] { + m[k] = v + } + } + + // Nil the result if the map is empty, thus avoiding triggering infinite reconcile + // given that at json level label: {} or annotation: {} is different from no field, which is the + // corresponding value stored in etcd given that those fields are defined as omitempty. + if len(m) == 0 { + return nil + } + return m +} diff --git a/util/conditions/helper.go b/util/conditions/helper.go index 2acb09093e..9dfd97b919 100644 --- a/util/conditions/helper.go +++ b/util/conditions/helper.go @@ -18,8 +18,8 @@ limitations under the License. package conditions import ( - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ErrorConditionAfterInit returns severity error, if the control plane is initialized; otherwise, returns severity warning. diff --git a/util/paused/paused.go b/util/paused/paused.go index 7750ded6d6..affaa5e556 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -26,13 +26,13 @@ import ( "fmt" "strings" - corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" @@ -46,10 +46,10 @@ type ConditionSetter interface { // EnsurePausedCondition sets the paused condition on the object and returns if it should be considered as paused. func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, obj ConditionSetter) (isPaused bool, conditionChanged bool, err error) { - oldCondition := conditions.Get(obj, clusterv1.PausedV1Beta2Condition) - newCondition := pausedCondition(c.Scheme(), cluster, obj, clusterv1.PausedV1Beta2Condition) + oldCondition := conditions.Get(obj, clusterv1.PausedCondition) + newCondition := pausedCondition(c.Scheme(), cluster, obj, clusterv1.PausedCondition) - isPaused = newCondition.Status == corev1.ConditionTrue + isPaused = newCondition.Status == metav1.ConditionTrue log := ctrl.LoggerFrom(ctx) @@ -72,10 +72,10 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste log.V(4).Info("Unpausing reconciliation for this object") } - conditions.Set(obj, &newCondition) + conditions.Set(obj, newCondition) - if err := patchHelper.Patch(ctx, obj, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - clusterv1.PausedV1Beta2Condition, + if err := patchHelper.Patch(ctx, obj, patch.WithOwnedConditions{Conditions: []string{ + clusterv1.PausedCondition, }}); err != nil { return isPaused, false, err } @@ -84,12 +84,18 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste } // pausedCondition sets the paused condition on the object and returns if it should be considered as paused. -func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj ConditionSetter, targetConditionType string) clusterv1.Condition { - if (cluster != nil && cluster.Spec.Paused) || annotations.HasPaused(obj) { - var messages []string - if cluster != nil && cluster.Spec.Paused { - messages = append(messages, "Cluster spec.paused is set to true") +func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj ConditionSetter, targetConditionType string) metav1.Condition { + clusterPaused := false + if cluster != nil && cluster.Spec.Paused != nil && *cluster.Spec.Paused { + clusterPaused = true + } + objectPaused := annotations.HasPaused(obj) + + if clusterPaused || objectPaused { + messages := []string{ + "Cluster spec.paused is set to true", } + if annotations.HasPaused(obj) { kind := "Object" if gvk, err := apiutil.GVKForObject(obj, scheme); err == nil { @@ -98,17 +104,17 @@ func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj Con messages = append(messages, fmt.Sprintf("%s has the cluster.x-k8s.io/paused annotation", kind)) } - return clusterv1.Condition{ - Type: clusterv1.ConditionType(targetConditionType), - Status: corev1.ConditionTrue, - Reason: clusterv1.PausedV1Beta2Reason, + return metav1.Condition{ + Type: targetConditionType, + Status: metav1.ConditionTrue, + Reason: clusterv1.PausedReason, Message: strings.Join(messages, ", "), } } - return clusterv1.Condition{ - Type: clusterv1.ConditionType(targetConditionType), - Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, + return metav1.Condition{ + Type: targetConditionType, + Status: metav1.ConditionFalse, + Reason: clusterv1.PausedCondition, } } diff --git a/util/paused/paused_test.go b/util/paused/paused_test.go index 6165263462..84691dbf92 100644 --- a/util/paused/paused_test.go +++ b/util/paused/paused_test.go @@ -24,10 +24,11 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -48,7 +49,7 @@ func TestEnsurePausedCondition(t *testing.T) { // Cluster Case 2: paused pausedCluster := normalCluster.DeepCopy() - pausedCluster.Spec.Paused = true + pausedCluster.Spec.Paused = ptr.To[bool](true) // Object case 1: unpaused obj := &builder.Phase1Obj{ObjectMeta: metav1.ObjectMeta{ From a7024a9fe8e4b09fe8f197e4919dd4f69e7d090f Mon Sep 17 00:00:00 2001 From: Toby Archer Date: Mon, 29 Sep 2025 01:31:20 +0200 Subject: [PATCH 2/2] Begin upgrade to v1beta2