diff --git a/Makefile b/Makefile index 7bfe7ab35..7dc595804 100644 --- a/Makefile +++ b/Makefile @@ -142,6 +142,9 @@ test/nightly-e2e/multi-region: bin/cockroach bin/kubectl bin/helm build/self-sig @PATH="$(PWD)/bin:${PATH}" go test -timeout 60m -v -test.run TestOperatorInMultiRegion ./tests/e2e/operator/multiRegion/... || (echo "Multi region tests failed with exit code $$?" && exit 1) +test/nightly-e2e/single-region: bin/cockroach bin/kubectl bin/helm build/self-signer + @PATH="$(PWD)/bin:${PATH}" go test -timeout 60m -v -test.run TestOperatorInSingleRegion ./tests/e2e/operator/singleRegion/... || EXIT_CODE=$$?; \ + test/lint: bin/helm ## lint the helm chart @build/lint.sh && \ bin/helm lint cockroachdb && \ diff --git a/build/templates/cockroachdb/values.yaml b/build/templates/cockroachdb/values.yaml index e46dc3fdf..a98ff9d46 100644 --- a/build/templates/cockroachdb/values.yaml +++ b/build/templates/cockroachdb/values.yaml @@ -874,7 +874,7 @@ operator: # Regions controls the number of CockroachDB nodes that are deployed per region. regions: - # Code corresponds to the cloud provider's identifier of this region (e.g. + # Code corresponds to the cloud infra's identifier of this region (e.g. # "us-east-1" for AWS, "us-east1" for GCP). This value is used to detect # which CrdbClusterRegion will be reconciled and must match the # "topology.kubernetes.io/region" label on Kubernetes Nodes in this @@ -882,7 +882,7 @@ operator: - code: us-east-1 # Nodes is the number of CRDB nodes that are in the region. nodes: 3 - # CloudProvider sets the cloud provider for this region. + # CloudProvider sets the cloud infra for this region. cloudProvider: k3d # Namespace is the name of the Kubernetes namespace that this # CrdbClusterRegion is deployed within. It is used to compute the --join diff --git a/cmd/migrate/cockroachdb-enterprise-operator/root.go b/cmd/migrate/cockroachdb-enterprise-operator/root.go index 0c29c6b08..2b813f080 100644 --- a/cmd/migrate/cockroachdb-enterprise-operator/root.go +++ b/cmd/migrate/cockroachdb-enterprise-operator/root.go @@ -40,11 +40,11 @@ func Execute() { } func init() { - buildManifestCmd.PersistentFlags().StringVar(&cloudProvider, "cloud-provider", "", "name of cloud provider") - buildManifestCmd.PersistentFlags().StringVar(&cloudRegion, "cloud-region", "", "name of cloud provider region") + buildManifestCmd.PersistentFlags().StringVar(&cloudProvider, "cloud-infra", "", "name of cloud infra") + buildManifestCmd.PersistentFlags().StringVar(&cloudRegion, "cloud-region", "", "name of cloud infra region") buildManifestCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", filepath.Join(homedir.HomeDir(), ".kube", "config"), "path to kubeconfig file") buildManifestCmd.PersistentFlags().StringVar(&outputDir, "output-dir", "./manifests", "manifest output directory") - _ = buildManifestCmd.MarkPersistentFlagRequired("cloud-provider") + _ = buildManifestCmd.MarkPersistentFlagRequired("cloud-infra") _ = buildManifestCmd.MarkPersistentFlagRequired("cloud-region") rootCmd.AddCommand(buildManifestCmd) rootCmd.AddCommand(migrateCertsCmd) diff --git a/cockroachdb/values.yaml b/cockroachdb/values.yaml index fbca88711..6ebce6486 100644 --- a/cockroachdb/values.yaml +++ b/cockroachdb/values.yaml @@ -875,7 +875,7 @@ operator: # Regions controls the number of CockroachDB nodes that are deployed per region. regions: - # Code corresponds to the cloud provider's identifier of this region (e.g. + # Code corresponds to the cloud infra's identifier of this region (e.g. # "us-east-1" for AWS, "us-east1" for GCP). This value is used to detect # which CrdbClusterRegion will be reconciled and must match the # "topology.kubernetes.io/region" label on Kubernetes Nodes in this @@ -883,7 +883,7 @@ operator: - code: us-east-1 # Nodes is the number of CRDB nodes that are in the region. nodes: 3 - # CloudProvider sets the cloud provider for this region. + # CloudProvider sets the cloud infra for this region. cloudProvider: k3d # Namespace is the name of the Kubernetes namespace that this # CrdbClusterRegion is deployed within. It is used to compute the --join diff --git a/go.mod b/go.mod index 4b338779f..08e567bc7 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,17 @@ module github.com/cockroachdb/helm-charts go 1.23.8 require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Masterminds/semver/v3 v3.2.1 + github.com/aws/aws-sdk-go-v2 v1.36.3 + github.com/aws/aws-sdk-go-v2/config v1.29.14 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.224.0 + github.com/aws/aws-sdk-go-v2/service/eks v1.64.0 + github.com/aws/aws-sdk-go-v2/service/iam v1.36.0 github.com/cenkalti/backoff v2.2.1+incompatible github.com/cockroachdb/cockroach-operator v0.0.0-20250618040001-5a36c88b7231 github.com/cockroachdb/errors v1.8.0 @@ -16,7 +26,7 @@ require ( github.com/robfig/cron v1.2.0 github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.10.0 google.golang.org/api v0.126.0 google.golang.org/protobuf v1.36.3 gopkg.in/yaml.v3 v3.0.1 @@ -32,11 +42,30 @@ require ( cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect emperror.dev/errors v0.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.20 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.14 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/aws/aws-sdk-go v1.44.122 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect + github.com/aws/smithy-go v1.22.2 // indirect github.com/banzaicloud/k8s-objectmatcher v1.8.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boombuler/barcode v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect github.com/cockroachdb/redact v1.0.6 // indirect github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect @@ -45,6 +74,7 @@ require ( github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect @@ -54,15 +84,17 @@ require ( github.com/go-openapi/swag v0.22.3 // indirect github.com/go-sql-driver/mysql v1.5.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.4 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosimple/slug v1.9.0 // indirect github.com/gruntwork-io/go-commons v0.8.0 // indirect @@ -83,6 +115,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect @@ -92,6 +125,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/otp v1.2.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect @@ -99,23 +133,25 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/urfave/cli v1.22.2 // indirect go.opencensus.io v0.24.0 // indirect + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.38.0 // indirect golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/net v0.38.0 // indirect + golang.org/x/net v0.40.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.25.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.58.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 1ffa2661d..8d7ac9a58 100644 --- a/go.sum +++ b/go.sum @@ -759,7 +759,22 @@ emperror.dev/errors v0.8.0/go.mod h1:YcRvLPh626Ubn2xqtoprejnA5nFha+TJ+2vew48kWuE gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0/go.mod h1:gYq8wyDgv6JLhGbAU6gg8amCPgQWRE+aCvrV2gyzdfs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= @@ -803,6 +818,22 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.224.0/go.mod h1:ouvGEfHbLaIlWwpDpOVWPWR+YwO0HDv3vm5tYLq8ImY= +github.com/aws/aws-sdk-go-v2/service/eks v1.64.0/go.mod h1:v1xXy6ea0PHtWkjFUvAUh6B/5wv7UF909Nru0dOIJDk= +github.com/aws/aws-sdk-go-v2/service/iam v1.36.0/go.mod h1:HSvujsK8xeEHMIB18oMXjSfqaN9cVqpo/MtHJIksQRk= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/banzaicloud/k8s-objectmatcher v1.8.0 h1:Nugn25elKtPMTA2br+JgHNeSQ04sc05MDPmpJnd1N2A= github.com/banzaicloud/k8s-objectmatcher v1.8.0/go.mod h1:p2LSNAjlECf07fbhDyebTkPUIYnU05G+WfGgkTmgeMg= @@ -829,6 +860,7 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -934,6 +966,8 @@ github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -1012,6 +1046,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -1115,6 +1150,7 @@ github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkj github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= @@ -1136,6 +1172,7 @@ github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -1293,6 +1330,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -1302,6 +1340,7 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -1437,6 +1476,7 @@ github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1505,6 +1545,7 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -1571,6 +1612,7 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1684,6 +1726,7 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1704,6 +1747,7 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1857,6 +1901,7 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2031,6 +2076,7 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2053,6 +2099,7 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2076,6 +2123,7 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2297,6 +2345,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2531,6 +2580,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/upstream/cockroach-operator/api/v1alpha1/crdbcluster_types.go b/pkg/upstream/cockroach-operator/api/v1alpha1/crdbcluster_types.go index 0cf338940..353bcbafe 100644 --- a/pkg/upstream/cockroach-operator/api/v1alpha1/crdbcluster_types.go +++ b/pkg/upstream/cockroach-operator/api/v1alpha1/crdbcluster_types.go @@ -141,7 +141,7 @@ type CrdbClusterSpec struct { // is used to generate the --join flag passed to each CrdbNode within the // cluster. type CrdbClusterRegion struct { - // Code corresponds to the cloud provider's identifier of this region (e.g. + // Code corresponds to the cloud infra's identifier of this region (e.g. // "us-east-1" for AWS, "us-east1" for GCP). This value is used to detect // which CrdbClusterRegion will be reconciled and must match the // "topology.kubernetes.io/region" label on Kubernetes Nodes in this @@ -155,7 +155,7 @@ type CrdbClusterRegion struct { // +kubebuilder:validation:Minimum:=0 Nodes int32 `json:"nodes"` - // CloudProvider sets the cloud provider for this region. When set, this value + // CloudProvider sets the cloud infra for this region. When set, this value // is used to prefix the locality flag for all nodes in the region. // +kubebuilder:validation:Optional CloudProvider string `json:"cloudProvider,omitempty"` @@ -394,8 +394,8 @@ type CrdbClusterStatus struct { // the beta cluster controller. ReconciledByBetaController bool `json:"reconciledByBetaController,omitempty"` - // Provider is the name of the cloud provider that this object's k8s server is in. - Provider string `json:"provider,omitempty"` + // Provider is the name of the cloud infra that this object's k8s server is in. + Provider string `json:"infra,omitempty"` // Region is the name of the region that this crdbcluster object's k8s server is in. // This is useful for consumers to determine if this region's crdb pods diff --git a/pkg/upstream/cockroach-operator/api/v1alpha1/crdbnode_types.go b/pkg/upstream/cockroach-operator/api/v1alpha1/crdbnode_types.go index 6648899ba..48c0f677d 100644 --- a/pkg/upstream/cockroach-operator/api/v1alpha1/crdbnode_types.go +++ b/pkg/upstream/cockroach-operator/api/v1alpha1/crdbnode_types.go @@ -173,7 +173,7 @@ type CrdbNodeSpec struct { EncryptionAtRest *EncryptionAtRest `json:"encryptionAtRest,omitempty"` // NoCloudPrefixedLocalities indicates whether this node's locality flags - // are prefixed with its cloud provider's short code. See + // are prefixed with its cloud infra's short code. See // IntrusionCrdbCluster.NoCloudPrefixedLocalities for more info. It is part // of CrdbNodeSpec as opposed to CrdbClusterSpec since the init container // is spawned from CrdbNodeSpec. This field is expected to be removed once diff --git a/tests/e2e/migrate/helpers_test.go b/tests/e2e/migrate/helpers_test.go index 961967513..7ac34a49b 100644 --- a/tests/e2e/migrate/helpers_test.go +++ b/tests/e2e/migrate/helpers_test.go @@ -51,7 +51,7 @@ func prepareForMigration(t *testing.T, stsName, namespace, caSecret, crdbDeploym crdbDeploymentType, fmt.Sprintf("%s=%s", cmdArg, stsName), fmt.Sprintf("--namespace=%s", namespace), - "--cloud-provider=k3d", + "--cloud-infra=k3d", "--cloud-region=us-east-1", fmt.Sprintf("--output-dir=%s", manifestsDirPath), }, diff --git a/tests/e2e/operator/infra/aws.go b/tests/e2e/operator/infra/aws.go new file mode 100644 index 000000000..b9b78514e --- /dev/null +++ b/tests/e2e/operator/infra/aws.go @@ -0,0 +1,1044 @@ +package infra + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go-v2/service/iam" + iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cockroachdb/helm-charts/tests/e2e/coredns" + "github.com/cockroachdb/helm-charts/tests/e2e/operator" +) + +// ─── AWS GLOBAL CONSTANTS ─────────────────────────────────────────────────────── + +const ( + awsVpcName = "nishanth-cockroachdb-vpc" + subnetNamePrefix = "helm-charts" + ClusterRoleARN = "arn:aws:iam::541263489771:role/EKSClusterRole" + NodeRoleARN = "arn:aws:iam::541263489771:role/EKSNodegroupRole" +) + +// AwsClusterSetupConfig holds per‐cluster parameters +type AwsClusterSetupConfig struct { + Region string + SubnetRange []string // e.g. {"172.28.12.0/24", "172.28.24.0/24", "172.28.36.0/24"} + ElasticAllocIDs []string // will be populated with eipalloc- IDs + ClusterName string // e.g. "cockroachdb-east-1" + InstanceTypes []string // e.g. {"m5.large"} + DesiredSize int32 // e.g. 3 + MinSize int32 // e.g. 3 + MaxSize int32 // e.g. 4 + ServiceCIDR string // e.g. "10.200.0.0/16" + UseElasticIP bool // if true, reuse or allocate EIPs for CoreDNS NLB +} + +// Fill in your cluster list here; index order must match r.Clusters +var awsClusterSetups = []AwsClusterSetupConfig{ + { + Region: "us-east-1", + SubnetRange: getAwsSubnetRanges("us-east-1"), + ElasticAllocIDs: []string{}, + ClusterName: "cockroachdb-east-1", + InstanceTypes: []string{AWSDefaultInstanceType}, + DesiredSize: DefaultNodeCount, + MinSize: DefaultMinNodeCount, + MaxSize: DefaultMaxNodeCount, + ServiceCIDR: getAwsServiceCIDR("us-east-1"), + UseElasticIP: true, + }, + { + Region: "us-east-2", + SubnetRange: getAwsSubnetRanges("us-east-2"), + ElasticAllocIDs: []string{}, + ClusterName: "cockroachdb-east-2", + InstanceTypes: []string{AWSDefaultInstanceType}, + DesiredSize: DefaultNodeCount, + MinSize: DefaultMinNodeCount, + MaxSize: DefaultMaxNodeCount, + ServiceCIDR: getAwsServiceCIDR("us-east-2"), + UseElasticIP: true, + }, + { + Region: "us-west-1", + SubnetRange: getAwsSubnetRanges("us-west-1"), + ElasticAllocIDs: []string{}, + ClusterName: "cockroachdb-west-1", + InstanceTypes: []string{AWSDefaultInstanceType}, + DesiredSize: DefaultNodeCount, + MinSize: DefaultMinNodeCount, + MaxSize: DefaultMaxNodeCount, + ServiceCIDR: getAwsServiceCIDR("us-west-1"), + UseElasticIP: true, + }, +} + +// Helper functions to get network configuration from common.go +func getAwsSubnetRanges(region string) []string { + if config, ok := NetworkConfigs[ProviderAWS][region]; ok { + if ranges, ok := config.(map[string]interface{})["SubnetRanges"].([]string); ok { + return ranges + } + } + // Fallback defaults if config not found + return []string{"172.28.12.0/24", "172.28.24.0/24", "172.28.36.0/24"} +} + +func getAwsServiceCIDR(region string) string { + if config, ok := NetworkConfigs[ProviderAWS][region]; ok { + if cidr, ok := config.(map[string]interface{})["ServiceCIDR"].(string); ok { + return cidr + } + } + // Fallback default if config not found + return "10.200.0.0/16" +} + +// AwsRegion implements CloudProvider for AWS +type AwsRegion struct { + *operator.Region +} + +// ScaleNodePool scales the node pool in an EKS cluster +func (r *AwsRegion) ScaleNodePool(t *testing.T, location string, nodeCount, index int) { + t.Logf("[%s] Scaling node pool in cluster %s to %d nodes", ProviderAWS, awsClusterSetups[index].ClusterName, nodeCount) + + // In a real implementation, this would use the AWS SDK to scale the node pool + // This would include getting the node pool and updating its count + t.Logf("[%s] Node pool scaling not fully implemented for AWS", ProviderAWS) + + // Uncomment and complete this code when implementing actual scaling: + /* + ctx := context.Background() + cfg, err := awsconfig.LoadDefaultConfig(ctx) + if err != nil { + t.Logf("[%s] Failed to load AWS config: %v", ProviderAWS, err) + return + } + + setup := awsClusterSetups[index] + eksClient := eks.NewFromConfig(cfg, func(o *eks.Options) { o.Region = setup.Region }) + + // Actual implementation would go here + */ +} + +func (r *AwsRegion) SetUpInfra(t *testing.T) { + if r.ReusingInfra { + t.Logf("[%s] Reusing existing infrastructure", ProviderAWS) + return + } + + t.Logf("[%s] Setting up infrastructure", ProviderAWS) + ctx := context.Background() + cfg, err := awsconfig.LoadDefaultConfig(ctx) + require.NoError(t, err) + + iamClient := iam.NewFromConfig(cfg) + stsClient := sts.NewFromConfig(cfg) + + // 1) Ensure IAM Roles exist + t.Logf("[%s] Ensuring IAM roles exist", ProviderAWS) + _, err = ensureClusterServiceRole(ctx, iamClient, ClusterRoleARN) + require.NoError(t, err) + _, err = ensureNodeInstanceRole(ctx, iamClient, stsClient, NodeRoleARN) + require.NoError(t, err) + + var clients = make(map[string]client.Client) + r.CorednsClusterOptions = make(map[string]coredns.CoreDNSClusterOption) + clusterSubnets := make([][]string, len(r.Clusters)) + + for i, clusterName := range r.Clusters { + setup := awsClusterSetups[i] + setup.ClusterName = clusterName + t.Logf("[%s] Setting up cluster %s in region %s", ProviderAWS, clusterName, setup.Region) + + // EC2 & EKS clients scoped to region + ec2Client := ec2.NewFromConfig(cfg, func(o *ec2.Options) { o.Region = setup.Region }) + eksClient := eks.NewFromConfig(cfg, func(o *eks.Options) { o.Region = setup.Region }) + + // 2) Create or get VPC + vpcID, err := createAwsVPC(ctx, ec2Client, awsVpcName, DefaultVPCCIDR) + require.NoError(t, err) + + // 2a) Ensure IGW + Public Route Table + igwID, err := ensureInternetGateway(ctx, ec2Client, vpcID) + require.NoError(t, err) + rtID, err := ensurePublicRouteTable(ctx, ec2Client, vpcID, igwID) + require.NoError(t, err) + + // 3a) Create or reuse 3 public subnets + subnetIDs, err := createAwsSubnets(ctx, ec2Client, vpcID, setup.SubnetRange, setup.Region) + require.NoError(t, err) + // Associate each subnet with the public RT + for _, snID := range subnetIDs { + _, err := ec2Client.AssociateRouteTable(ctx, &ec2.AssociateRouteTableInput{ + RouteTableId: aws.String(rtID), + SubnetId: aws.String(snID), + }) + if err != nil && !IsResourceConflict(err) { + t.Fatalf("[%s] Failed to associate subnet %q with RT %q: %v", ProviderAWS, snID, rtID, err) + } + } + clusterSubnets[i] = subnetIDs + + // 3b) Create control‐plane & worker‐node SGs + firewall rules + controlSG, workerSG, err := createEKSClusterSecurityGroups(ctx, ec2Client, vpcID, clusterName) + require.NoError(t, err) + + // 3c) Create EKS control plane (public + private endpoint) + t.Logf("[%s] Creating EKS cluster %s", ProviderAWS, clusterName) + err = createEKSCluster(ctx, eksClient, setup, subnetIDs, controlSG) + require.NoError(t, err) + + // 3d) Update kubeconfig to point to this cluster + err = UpdateKubeconfigAWS(t, setup.Region, clusterName) + require.NoError(t, err) + + // 3e) Create managed NodeGroup + t.Logf("[%s] Creating EKS node group for cluster %s", ProviderAWS, clusterName) + err = createManagedNodeGroup(ctx, eksClient, setup, subnetIDs, workerSG) + require.NoError(t, err) + + // 3f) Prepare CoreDNS options + r.Namespace[clusterName] = fmt.Sprintf("%s-%s", operator.Namespace, strings.ToLower(random.UniqueId())) + clients[clusterName] = MustNewClientForContext(t, clusterName) + r.CorednsClusterOptions[operator.CustomDomains[i]] = coredns.CoreDNSClusterOption{ + IPs: setup.ElasticAllocIDs, + Namespace: r.Namespace[clusterName], + Domain: operator.CustomDomains[i], + } + } + + // 4) Deploy CoreDNS ConfigMap + Service (with NLB & EIPs), then rollout restart + for i, clusterName := range r.Clusters { + setup := awsClusterSetups[i] + subnetIDs := clusterSubnets[i] + kubeConfig, err := k8s.KubeConfigPathFromHomeDirE() + require.NoError(t, err) + + // 4a) Reuse or allocate Elastic IPs if needed + if setup.UseElasticIP { + t.Logf("[%s] Ensuring Elastic IPs for cluster %s", ProviderAWS, clusterName) + allocationIDs, err := ensureElasticIPs(ctx, ec2.NewFromConfig(cfg, func(o *ec2.Options) { + o.Region = setup.Region + }), len(subnetIDs)) + require.NoError(t, err) + setup.ElasticAllocIDs = allocationIDs + awsClusterSetups[i].ElasticAllocIDs = allocationIDs + r.CorednsClusterOptions[operator.CustomDomains[i]] = coredns.CoreDNSClusterOption{ + IPs: allocationIDs, + Namespace: r.Namespace[clusterName], + Domain: operator.CustomDomains[i], + } + } + + // 4b) Deploy CoreDNS + annotations := GetLoadBalancerAnnotations(ProviderAWS) + // Add AWS-specific annotations + annotations["service.beta.kubernetes.io/aws-load-balancer-eip-allocations"] = strings.Join(setup.ElasticAllocIDs, ",") + annotations["service.beta.kubernetes.io/aws-load-balancer-subnets"] = strings.Join(subnetIDs, ",") + + var staticIP *string // AWS uses EIP allocations instead of direct IPs + + err = DeployCoreDNS(t, clusterName, kubeConfig, staticIP, ProviderAWS, operator.CustomDomains[i], r.CorednsClusterOptions) + require.NoError(t, err, "failed to deploy CoreDNS to cluster %s", clusterName) + } + + r.Clients = clients + r.ReusingInfra = true + t.Logf("[%s] Infrastructure setup completed", ProviderAWS) +} + +// TeardownInfra deletes all AWS resources created by SetUpInfra +func (r *AwsRegion) TeardownInfra(t *testing.T) { + t.Logf("[%s] Starting infrastructure teardown", ProviderAWS) + ctx := context.Background() + cfg, err := awsconfig.LoadDefaultConfig(ctx) + require.NoError(t, err) + + for _, setup := range awsClusterSetups[:len(r.Clusters)] { + region := setup.Region + clusterName := setup.ClusterName + + ec2Client := ec2.NewFromConfig(cfg, func(o *ec2.Options) { o.Region = region }) + eksClient := eks.NewFromConfig(cfg, func(o *eks.Options) { o.Region = region }) + + // a) Delete NodeGroup + ngName := clusterName + "-ng" + t.Logf("[%s] Deleting node group '%s' from cluster '%s'", ProviderAWS, ngName, clusterName) + _, _ = eksClient.DeleteNodegroup(ctx, &eks.DeleteNodegroupInput{ + ClusterName: aws.String(clusterName), + NodegroupName: aws.String(ngName), + }) + // Wait until gone + for { + _, err := eksClient.DescribeNodegroup(ctx, &eks.DescribeNodegroupInput{ + ClusterName: aws.String(clusterName), + NodegroupName: aws.String(ngName), + }) + if err != nil { + break + } + time.Sleep(15 * time.Second) + } + + // b) Delete Cluster + t.Logf("[%s] Deleting EKS cluster '%s'", ProviderAWS, clusterName) + _, _ = eksClient.DeleteCluster(ctx, &eks.DeleteClusterInput{Name: aws.String(clusterName)}) + // Wait until gone + for { + _, err = eksClient.DescribeCluster(ctx, &eks.DescribeClusterInput{Name: aws.String(clusterName)}) + if err != nil { + break + } + time.Sleep(15 * time.Second) + } + + // c) Release Elastic IPs + t.Logf("[%s] Releasing Elastic IPs for cluster '%s'", ProviderAWS, clusterName) + out, _ := ec2Client.DescribeAddresses(ctx, &ec2.DescribeAddressesInput{ + Filters: []ec2types.Filter{ + { + Name: aws.String("tag:Name"), + // EC2 tag filters accept '*' and '?' wild-cards, so this matches every prefix variant. + Values: []string{"self-hosted-testing-eip-*"}, + }, + }, + }) + + // Release only the unassociated ones that matched the filter + for _, addr := range out.Addresses { + if addr.AssociationId == nil { // still free + _, _ = ec2Client.ReleaseAddress(ctx, &ec2.ReleaseAddressInput{ + AllocationId: addr.AllocationId, + }) + } + } + + // d) Delete Security Groups (by tag Name = -control-sg and -worker-sg) + t.Logf("[%s] Deleting security groups for cluster '%s'", ProviderAWS, clusterName) + sgFilters := []ec2types.Filter{ + {Name: aws.String("vpc-id"), Values: []string{awsVpcName}}, // using VPC ID tag works too + {Name: aws.String("tag:Name"), Values: []string{clusterName + "-control-sg", clusterName + "-worker-sg"}}, + } + sgOut, _ := ec2Client.DescribeSecurityGroups(ctx, &ec2.DescribeSecurityGroupsInput{Filters: sgFilters}) + for _, sg := range sgOut.SecurityGroups { + _, _ = ec2Client.DeleteSecurityGroup(ctx, &ec2.DeleteSecurityGroupInput{GroupId: sg.GroupId}) + } + + // e) Delete subnets + t.Logf("[%s] Deleting subnets for cluster '%s'", ProviderAWS, clusterName) + subnetFilters := []ec2types.Filter{{Name: aws.String("tag:Name"), Values: []string{subnetNamePrefix + "-*"}}} + subnetsOut, _ := ec2Client.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{Filters: subnetFilters}) + for _, sn := range subnetsOut.Subnets { + _, _ = ec2Client.DeleteSubnet(ctx, &ec2.DeleteSubnetInput{SubnetId: sn.SubnetId}) + } + + // f) Delete Route Table + t.Logf("[%s] Cleaning up networking resources", ProviderAWS) + vpcsOut, _ := ec2Client.DescribeVpcs(ctx, &ec2.DescribeVpcsInput{Filters: []ec2types.Filter{{Name: aws.String("tag:Name"), Values: []string{awsVpcName}}}}) + if len(vpcsOut.Vpcs) > 0 { + vpcID := *vpcsOut.Vpcs[0].VpcId + rtFilters := []ec2types.Filter{{Name: aws.String("tag:Name"), Values: []string{vpcID + "-public-rt"}}} + rtOut, _ := ec2Client.DescribeRouteTables(ctx, &ec2.DescribeRouteTablesInput{Filters: rtFilters}) + for _, rt := range rtOut.RouteTables { + _, _ = ec2Client.DeleteRouteTable(ctx, &ec2.DeleteRouteTableInput{RouteTableId: rt.RouteTableId}) + } + + // g) Detach & Delete Internet Gateway + igwOut, _ := ec2Client.DescribeInternetGateways(ctx, &ec2.DescribeInternetGatewaysInput{ + Filters: []ec2types.Filter{{Name: aws.String("attachment.vpc-id"), Values: []string{vpcID}}}, + }) + for _, igw := range igwOut.InternetGateways { + _, _ = ec2Client.DetachInternetGateway(ctx, &ec2.DetachInternetGatewayInput{ + InternetGatewayId: igw.InternetGatewayId, + VpcId: aws.String(vpcID), + }) + _, _ = ec2Client.DeleteInternetGateway(ctx, &ec2.DeleteInternetGatewayInput{ + InternetGatewayId: igw.InternetGatewayId, + }) + } + + // h) Finally, delete the VPC + t.Logf("[%s] Deleting VPC '%s'", ProviderAWS, vpcID) + _, _ = ec2Client.DeleteVpc(ctx, &ec2.DeleteVpcInput{VpcId: aws.String(vpcID)}) + } + } + + t.Logf("[%s] Infrastructure teardown completed", ProviderAWS) +} + +// ─── ENSURE IAM ROLES EXIST ────────────────────────────────────────────────────── + +func ensureClusterServiceRole(ctx context.Context, iamClient *iam.Client, roleARN string) (string, error) { + roleName, err := extractRoleName(roleARN) + if err != nil { + return "", fmt.Errorf("invalid ClusterRoleARN %q: %w", roleARN, err) + } + out, err := iamClient.GetRole(ctx, &iam.GetRoleInput{RoleName: aws.String(roleName)}) + if err == nil { + return aws.ToString(out.Role.Arn), nil + } + var noEnt *iamtypes.NoSuchEntityException + if !errors.As(err, &noEnt) { + return "", fmt.Errorf("error fetching IAM role %q: %w", roleName, err) + } + + trust := map[string]interface{}{ + "Version": "2012-10-17", + "Statement": []map[string]interface{}{ + {"Effect": "Allow", "Principal": map[string]interface{}{"Service": "eks.amazonaws.com"}, "Action": "sts:AssumeRole"}, + }, + } + trustBytes, _ := json.Marshal(trust) + created, err := iamClient.CreateRole(ctx, &iam.CreateRoleInput{ + RoleName: aws.String(roleName), + AssumeRolePolicyDocument: aws.String(string(trustBytes)), + Description: aws.String("EKS Cluster Service Role"), + Tags: []iamtypes.Tag{{Key: aws.String("Name"), Value: aws.String(roleName)}}, + }) + if err != nil { + return "", fmt.Errorf("failed to create role %q: %w", roleName, err) + } + for _, pol := range []string{ + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController", + } { + _, _ = iamClient.AttachRolePolicy(ctx, &iam.AttachRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyArn: aws.String(pol), + }) + } + return aws.ToString(created.Role.Arn), nil +} + +func ensureNodeInstanceRole( + ctx context.Context, + iamClient *iam.Client, + stsClient *sts.Client, + roleARN string, +) (string, error) { + roleName, err := extractRoleName(roleARN) + if err != nil { + return "", fmt.Errorf("invalid NodeRoleARN %q: %w", roleARN, err) + } + out, err := iamClient.GetRole(ctx, &iam.GetRoleInput{RoleName: aws.String(roleName)}) + if err == nil { + return aws.ToString(out.Role.Arn), nil + } + var noEnt *iamtypes.NoSuchEntityException + if !errors.As(err, &noEnt) { + return "", fmt.Errorf("error fetching IAM role %q: %w", roleName, err) + } + + trust := map[string]interface{}{ + "Version": "2012-10-17", + "Statement": []map[string]interface{}{ + {"Effect": "Allow", "Principal": map[string]interface{}{"Service": "ec2.amazonaws.com"}, "Action": "sts:AssumeRole"}, + }, + } + trustBytes, _ := json.Marshal(trust) + created, err := iamClient.CreateRole(ctx, &iam.CreateRoleInput{ + RoleName: aws.String(roleName), + AssumeRolePolicyDocument: aws.String(string(trustBytes)), + Description: aws.String("EKS Worker Node Instance Role"), + Tags: []iamtypes.Tag{{Key: aws.String("Name"), Value: aws.String(roleName)}}, + }) + if err != nil { + return "", fmt.Errorf("failed to create NodeInstanceRole %q: %w", roleName, err) + } + nodeRoleARN := aws.ToString(created.Role.Arn) + + for _, pol := range []string{ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + } { + _, _ = iamClient.AttachRolePolicy(ctx, &iam.AttachRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyArn: aws.String(pol), + }) + } + + if err = ensureCallerCanPassRole(ctx, iamClient, stsClient, nodeRoleARN); err != nil { + return "", err + } + return nodeRoleARN, nil +} + +func ensureCallerCanPassRole( + ctx context.Context, + iamClient *iam.Client, + stsClient *sts.Client, + roleARN string, +) error { + caller, err := stsClient.GetCallerIdentity(ctx, &sts.GetCallerIdentityInput{}) + if err != nil { + return fmt.Errorf("cannot get caller identity: %w", err) + } + callerARN := aws.ToString(caller.Arn) + + var callerName string + var isUser bool + if strings.Contains(callerARN, ":user/") { + parts := strings.Split(callerARN, "/") + callerName = parts[len(parts)-1] + isUser = true + } else if strings.Contains(callerARN, ":role/") { + parts := strings.Split(callerARN, "/") + callerName = parts[len(parts)-1] + isUser = false + } else { + return fmt.Errorf("caller ARN %q is neither user nor role", callerARN) + } + + sim, err := iamClient.SimulatePrincipalPolicy(ctx, &iam.SimulatePrincipalPolicyInput{ + PolicySourceArn: aws.String(callerARN), + ActionNames: []string{"iam:PassRole"}, + ResourceArns: []string{roleARN}, + }) + if err != nil { + return fmt.Errorf("simulate policy error: %w", err) + } + for _, r := range sim.EvaluationResults { + if r.EvalDecision != iamtypes.PolicyEvaluationDecisionTypeAllowed { + inline := map[string]interface{}{ + "Version": "2012-10-17", + "Statement": []map[string]interface{}{ + {"Effect": "Allow", "Action": "iam:PassRole", "Resource": roleARN}, + }, + } + inlineBytes, _ := json.Marshal(inline) + if isUser { + _, _ = iamClient.PutUserPolicy(ctx, &iam.PutUserPolicyInput{ + UserName: aws.String(callerName), + PolicyName: aws.String("AllowPassRoleToNodeRole"), + PolicyDocument: aws.String(string(inlineBytes)), + }) + } else { + _, _ = iamClient.PutRolePolicy(ctx, &iam.PutRolePolicyInput{ + RoleName: aws.String(callerName), + PolicyName: aws.String("AllowPassRoleToNodeRole"), + PolicyDocument: aws.String(string(inlineBytes)), + }) + } + break + } + } + return nil +} + +func extractRoleName(roleARN string) (string, error) { + parts := strings.Split(roleARN, "/") + if len(parts) < 2 { + return "", fmt.Errorf("cannot parse role ARN: %q", roleARN) + } + return parts[len(parts)-1], nil +} + +// ─── VPC, SUBNET, INTERNET GATEWAY, ROUTE TABLE, SECURITY GROUPS ────────────── + +func createAwsVPC(ctx context.Context, client *ec2.Client, name, cidr string) (string, error) { + vpcs, err := client.DescribeVpcs(ctx, &ec2.DescribeVpcsInput{ + Filters: []ec2types.Filter{{Name: aws.String("tag:Name"), Values: []string{name}}}, + }) + if err != nil { + return "", err + } + if len(vpcs.Vpcs) > 0 { + return aws.ToString(vpcs.Vpcs[0].VpcId), nil + } + out, err := client.CreateVpc(ctx, &ec2.CreateVpcInput{ + CidrBlock: aws.String(cidr), + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeVpc, + Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String(name)}}, + }}, + }) + if err != nil { + return "", err + } + vpcID := aws.ToString(out.Vpc.VpcId) + // Enable DNS support & hostnames + client.ModifyVpcAttribute(ctx, &ec2.ModifyVpcAttributeInput{ + VpcId: out.Vpc.VpcId, + EnableDnsSupport: &ec2types.AttributeBooleanValue{Value: aws.Bool(true)}, + }) + client.ModifyVpcAttribute(ctx, &ec2.ModifyVpcAttributeInput{ + VpcId: out.Vpc.VpcId, + EnableDnsHostnames: &ec2types.AttributeBooleanValue{Value: aws.Bool(true)}, + }) + return vpcID, nil +} + +func createAwsSubnets( + ctx context.Context, + client *ec2.Client, + vpcID string, + cidrBlocks []string, + region string, +) ([]string, error) { + if len(cidrBlocks) < 3 { + return nil, fmt.Errorf("need at least 3 CIDR blocks, got %d", len(cidrBlocks)) + } + // 1) List first 3 AZs + azResp, err := client.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("region-name"), Values: []string{region}}, + {Name: aws.String("state"), Values: []string{"available"}}, + }, + }) + if err != nil { + return nil, fmt.Errorf("list AZs in %s: %w", region, err) + } + if len(azResp.AvailabilityZones) < 3 { + return nil, fmt.Errorf("fewer than 3 AZs in %q", region) + } + azNames := []string{ + *azResp.AvailabilityZones[0].ZoneName, + *azResp.AvailabilityZones[1].ZoneName, + *azResp.AvailabilityZones[2].ZoneName, + } + + var subnetIDs []string + for i, az := range azNames { + desiredTag := fmt.Sprintf("%s-%s", subnetNamePrefix, az) + desc, err := client.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("vpc-id"), Values: []string{vpcID}}, + {Name: aws.String("tag:Name"), Values: []string{desiredTag}}, + {Name: aws.String("availability-zone"), Values: []string{az}}, + }, + }) + if err != nil { + return nil, fmt.Errorf("describe subnets %q: %w", desiredTag, err) + } + var thisSubnetID string + if len(desc.Subnets) > 0 { + thisSubnetID = aws.ToString(desc.Subnets[0].SubnetId) + } else { + out, err := client.CreateSubnet(ctx, &ec2.CreateSubnetInput{ + VpcId: aws.String(vpcID), + CidrBlock: aws.String(cidrBlocks[i]), + AvailabilityZone: aws.String(az), + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeSubnet, + Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String(desiredTag)}}, + }}, + }) + if err != nil { + return nil, fmt.Errorf("create subnet %q: %w", desiredTag, err) + } + thisSubnetID = aws.ToString(out.Subnet.SubnetId) + } + // Enable Auto-assign Public IP + client.ModifySubnetAttribute(ctx, &ec2.ModifySubnetAttributeInput{ + SubnetId: aws.String(thisSubnetID), + MapPublicIpOnLaunch: &ec2types.AttributeBooleanValue{Value: aws.Bool(true)}, + }) + subnetIDs = append(subnetIDs, thisSubnetID) + } + return subnetIDs, nil +} + +func ensureInternetGateway(ctx context.Context, client *ec2.Client, vpcID string) (string, error) { + igws, err := client.DescribeInternetGateways(ctx, &ec2.DescribeInternetGatewaysInput{ + Filters: []ec2types.Filter{{Name: aws.String("attachment.vpc-id"), Values: []string{vpcID}}}, + }) + if err != nil { + return "", err + } + if len(igws.InternetGateways) > 0 { + return aws.ToString(igws.InternetGateways[0].InternetGatewayId), nil + } + createOut, err := client.CreateInternetGateway(ctx, &ec2.CreateInternetGatewayInput{ + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeInternetGateway, + Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String(vpcID + "-igw")}}, + }}, + }) + if err != nil { + return "", err + } + igwID := aws.ToString(createOut.InternetGateway.InternetGatewayId) + _, err = client.AttachInternetGateway(ctx, &ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String(igwID), + VpcId: aws.String(vpcID), + }) + return igwID, err +} + +func ensurePublicRouteTable(ctx context.Context, client *ec2.Client, vpcID, igwID string) (string, error) { + rtResp, err := client.DescribeRouteTables(ctx, &ec2.DescribeRouteTablesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("vpc-id"), Values: []string{vpcID}}, + {Name: aws.String("tag:Name"), Values: []string{vpcID + "-public-rt"}}, + }, + }) + if err != nil { + return "", err + } + if len(rtResp.RouteTables) > 0 { + return aws.ToString(rtResp.RouteTables[0].RouteTableId), nil + } + createOut, err := client.CreateRouteTable(ctx, &ec2.CreateRouteTableInput{ + VpcId: aws.String(vpcID), + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeRouteTable, + Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String(vpcID + "-public-rt")}}}, + }, + }) + if err != nil { + return "", err + } + rtID := aws.ToString(createOut.RouteTable.RouteTableId) + _, err = client.CreateRoute(ctx, &ec2.CreateRouteInput{ + RouteTableId: aws.String(rtID), + DestinationCidrBlock: aws.String("0.0.0.0/0"), + GatewayId: aws.String(igwID), + }) + return rtID, err +} + +func createEKSClusterSecurityGroups( + ctx context.Context, + client *ec2.Client, + vpcID, clusterName string, +) (controlSG string, workerSG string, _ error) { + controlName := clusterName + "-control-sg" + workerName := clusterName + "-worker-sg" + + controlID, err := findOrCreateSG(ctx, client, vpcID, controlName, "EKS control-plane SG") + if err != nil { + return "", "", err + } + workerID, err := findOrCreateSG(ctx, client, vpcID, workerName, "EKS worker-node SG") + if err != nil { + return "", "", err + } + + // Collect all subnet CIDRs + subnetsOut, err := client.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{ + Filters: []ec2types.Filter{{Name: aws.String("vpc-id"), Values: []string{vpcID}}}, + }) + if err != nil { + return "", "", err + } + var allSubnetCIDRs []string + for _, sn := range subnetsOut.Subnets { + allSubnetCIDRs = append(allSubnetCIDRs, aws.ToString(sn.CidrBlock)) + } + + // controlSG: allow inbound TCP:443 from workerSG + _ = revokeIfExists(ctx, client, controlID, workerID, 443, 443, "tcp") + _, err = client.AuthorizeSecurityGroupIngress(ctx, &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String(controlID), + IpPermissions: []ec2types.IpPermission{{ + FromPort: aws.Int32(443), + ToPort: aws.Int32(443), + IpProtocol: aws.String("tcp"), + UserIdGroupPairs: []ec2types.UserIdGroupPair{{GroupId: aws.String(workerID)}}, + }}, + }) + if err != nil { + return "", "", err + } + + // workerSG: ingress TCP:9443 from all subnets + _ = revokeCIDRIngress(ctx, client, workerID, 9443, 9443, "tcp", allSubnetCIDRs) + _, err = client.AuthorizeSecurityGroupIngress(ctx, &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String(workerID), + IpPermissions: []ec2types.IpPermission{{ + FromPort: aws.Int32(9443), + ToPort: aws.Int32(9443), + IpProtocol: aws.String("tcp"), + IpRanges: toIPRanges(allSubnetCIDRs), + }}, + }) + if err != nil { + return "", "", err + } + + // workerSG: internal traffic (TCP 0–65535, UDP 0–65535, ICMP all) from all subnets + _ = revokeCIDRIngress(ctx, client, workerID, 0, 65535, "tcp", allSubnetCIDRs) + _ = revokeCIDRIngress(ctx, client, workerID, 0, 65535, "udp", allSubnetCIDRs) + _ = revokeCIDRIngress(ctx, client, workerID, -1, -1, "icmp", allSubnetCIDRs) + _, err = client.AuthorizeSecurityGroupIngress(ctx, &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String(workerID), + IpPermissions: []ec2types.IpPermission{ + {FromPort: aws.Int32(0), ToPort: aws.Int32(65535), IpProtocol: aws.String("tcp"), IpRanges: toIPRanges(allSubnetCIDRs)}, + {FromPort: aws.Int32(0), ToPort: aws.Int32(65535), IpProtocol: aws.String("udp"), IpRanges: toIPRanges(allSubnetCIDRs)}, + {FromPort: aws.Int32(-1), ToPort: aws.Int32(-1), IpProtocol: aws.String("icmp"), IpRanges: toIPRanges(allSubnetCIDRs)}, + }, + }) + if err != nil { + return "", "", err + } + + // workerSG: allow controlSG on TCP:443 & TCP:1025–65535 + _ = revokeIfExists(ctx, client, workerID, controlID, 443, 443, "tcp") + _ = revokeIfExists(ctx, client, workerID, controlID, 1025, 65535, "tcp") + _, err = client.AuthorizeSecurityGroupIngress(ctx, &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String(workerID), + IpPermissions: []ec2types.IpPermission{ + {FromPort: aws.Int32(443), ToPort: aws.Int32(443), IpProtocol: aws.String("tcp"), UserIdGroupPairs: []ec2types.UserIdGroupPair{{GroupId: aws.String(controlID)}}}, + {FromPort: aws.Int32(1025), ToPort: aws.Int32(65535), IpProtocol: aws.String("tcp"), UserIdGroupPairs: []ec2types.UserIdGroupPair{{GroupId: aws.String(controlID)}}}, + }, + }) + if err != nil { + return "", "", err + } + + // workerSG: egress all + _ = revokeAllEgress(ctx, client, workerID) + _, err = client.AuthorizeSecurityGroupEgress(ctx, &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: aws.String(workerID), + IpPermissions: []ec2types.IpPermission{{ + IpProtocol: aws.String("-1"), + IpRanges: []ec2types.IpRange{{CidrIp: aws.String("0.0.0.0/0")}}, + }}, + }) + if err != nil { + return "", "", err + } + + return controlID, workerID, nil +} + +func findOrCreateSG(ctx context.Context, client *ec2.Client, vpcID, name, desc string) (string, error) { + out, err := client.DescribeSecurityGroups(ctx, &ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("vpc-id"), Values: []string{vpcID}}, + {Name: aws.String("tag:Name"), Values: []string{name}}, + }, + }) + if err != nil { + return "", err + } + if len(out.SecurityGroups) > 0 { + return aws.ToString(out.SecurityGroups[0].GroupId), nil + } + created, err := client.CreateSecurityGroup(ctx, &ec2.CreateSecurityGroupInput{ + GroupName: aws.String(name), + Description: aws.String(desc), + VpcId: aws.String(vpcID), + TagSpecifications: []ec2types.TagSpecification{{ + ResourceType: ec2types.ResourceTypeSecurityGroup, + Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String(name)}}, + }}, + }) + if err != nil { + return "", err + } + return aws.ToString(created.GroupId), nil +} + +func revokeIfExists(ctx context.Context, client *ec2.Client, toSG, fromSG string, fromPort, toPort int32, ipProto string) error { + _, _ = client.RevokeSecurityGroupIngress(ctx, &ec2.RevokeSecurityGroupIngressInput{ + GroupId: aws.String(toSG), + IpPermissions: []ec2types.IpPermission{{ + FromPort: aws.Int32(fromPort), + ToPort: aws.Int32(toPort), + IpProtocol: aws.String(ipProto), + UserIdGroupPairs: []ec2types.UserIdGroupPair{ + {GroupId: aws.String(fromSG)}, + }, + }}, + }) + return nil +} + +func revokeCIDRIngress(ctx context.Context, client *ec2.Client, toSG string, fromPort, toPort int32, ipProto string, cidrs []string) error { + var ipRanges []ec2types.IpRange + for _, cidr := range cidrs { + ipRanges = append(ipRanges, ec2types.IpRange{CidrIp: aws.String(cidr)}) + } + _, _ = client.RevokeSecurityGroupIngress(ctx, &ec2.RevokeSecurityGroupIngressInput{ + GroupId: aws.String(toSG), + IpPermissions: []ec2types.IpPermission{{ + FromPort: aws.Int32(fromPort), + ToPort: aws.Int32(toPort), + IpProtocol: aws.String(ipProto), + IpRanges: ipRanges, + }}, + }) + return nil +} + +func revokeAllEgress(ctx context.Context, client *ec2.Client, sgID string) error { + _, _ = client.RevokeSecurityGroupEgress(ctx, &ec2.RevokeSecurityGroupEgressInput{ + GroupId: aws.String(sgID), + IpPermissions: []ec2types.IpPermission{{ + IpProtocol: aws.String("-1"), + IpRanges: []ec2types.IpRange{{CidrIp: aws.String("0.0.0.0/0")}}, + }}, + }) + return nil +} + +func toIPRanges(cidrs []string) []ec2types.IpRange { + var ranges []ec2types.IpRange + for _, c := range cidrs { + ranges = append(ranges, ec2types.IpRange{CidrIp: aws.String(c)}) + } + return ranges +} + +// ─── EKS CLUSTER + NODEGROUP CREATION ────────────────────────────────────────── + +func createEKSCluster( + ctx context.Context, + eksClient *eks.Client, + cfg AwsClusterSetupConfig, + subnetIDs []string, + controlSG string, +) error { + _, err := eksClient.DescribeCluster(ctx, &eks.DescribeClusterInput{Name: aws.String(cfg.ClusterName)}) + if err == nil { + return nil // already exists + } + var notFound *ekstypes.ResourceNotFoundException + if !errors.As(err, ¬Found) { + return err + } + _, err = eksClient.CreateCluster(ctx, &eks.CreateClusterInput{ + Name: aws.String(cfg.ClusterName), + RoleArn: aws.String(ClusterRoleARN), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + SubnetIds: subnetIDs, + SecurityGroupIds: []string{controlSG}, + EndpointPublicAccess: aws.Bool(true), + EndpointPrivateAccess: aws.Bool(true), + }, + KubernetesNetworkConfig: &ekstypes.KubernetesNetworkConfigRequest{ + ServiceIpv4Cidr: aws.String(cfg.ServiceCIDR), + }, + }) + if err != nil { + return err + } + // Wait for ACTIVE + for { + out, err := eksClient.DescribeCluster(ctx, &eks.DescribeClusterInput{Name: aws.String(cfg.ClusterName)}) + if err != nil { + return err + } + if out.Cluster.Status == ekstypes.ClusterStatusActive { + break + } + time.Sleep(15 * time.Second) + } + return nil +} + +func createManagedNodeGroup( + ctx context.Context, + eksClient *eks.Client, + cfg AwsClusterSetupConfig, + subnetIDs []string, + workerSG string, +) error { + ngName := cfg.ClusterName + "-ng" + _, err := eksClient.DescribeNodegroup(ctx, &eks.DescribeNodegroupInput{ + ClusterName: aws.String(cfg.ClusterName), + NodegroupName: aws.String(ngName), + }) + if err == nil { + return nil // exists + } + var notFound *ekstypes.ResourceNotFoundException + if !errors.As(err, ¬Found) { + return err + } + _, err = eksClient.CreateNodegroup(ctx, &eks.CreateNodegroupInput{ + ClusterName: aws.String(cfg.ClusterName), + NodegroupName: aws.String(ngName), + ScalingConfig: &ekstypes.NodegroupScalingConfig{ + DesiredSize: aws.Int32(cfg.DesiredSize), + MinSize: aws.Int32(cfg.MinSize), + MaxSize: aws.Int32(cfg.MaxSize), + }, + Subnets: subnetIDs, + InstanceTypes: cfg.InstanceTypes, + NodeRole: aws.String(NodeRoleARN), + DiskSize: aws.Int32(20), + }) + if err != nil { + return err + } + for { + out, err := eksClient.DescribeNodegroup(ctx, &eks.DescribeNodegroupInput{ + ClusterName: aws.String(cfg.ClusterName), + NodegroupName: aws.String(ngName), + }) + if err != nil { + return err + } + if out.Nodegroup.Status == ekstypes.NodegroupStatusActive { + break + } + time.Sleep(15 * time.Second) + } + return nil +} + +// ─── ELASTIC IP ALLOCATION ───────────────────────────────────────────────────── + +// ensureElasticIPs reuses unassociated EIPs by tag or allocates new ones +func ensureElasticIPs(ctx context.Context, client *ec2.Client, count int) ([]string, error) { + var result []string + + // 1) Find existing unassociated EIPs tagged "cockroachdb-eip-..." + addrsOut, err := client.DescribeAddresses(ctx, &ec2.DescribeAddressesInput{}) + if err != nil { + return nil, err + } + for _, addr := range addrsOut.Addresses { + if addr.AssociationId == nil { + for _, tag := range addr.Tags { + if tag.Key != nil && *tag.Key == "Name" && strings.HasPrefix(*tag.Value, "self-hosted-testing-eip-") { + result = append(result, aws.ToString(addr.AllocationId)) + break + } + } + } + if len(result) >= count { + return result[:count], nil + } + } + + // 2) Allocate remaining count + toAllocate := count - len(result) + for i := 0; i < toAllocate; i++ { + out, err := client.AllocateAddress(ctx, &ec2.AllocateAddressInput{Domain: ec2types.DomainTypeVpc}) + if err != nil { + return nil, err + } + allocID := aws.ToString(out.AllocationId) + // Tag it for future reuse + _, _ = client.CreateTags(ctx, &ec2.CreateTagsInput{ + Resources: []string{allocID}, + Tags: []ec2types.Tag{{Key: aws.String("Name"), Value: aws.String("self-hosted-testing-eip-" + allocID)}}, + }) + result = append(result, allocID) + } + return result, nil +} diff --git a/tests/e2e/operator/infra/azure.go b/tests/e2e/operator/infra/azure.go new file mode 100644 index 000000000..ee76e0bbe --- /dev/null +++ b/tests/e2e/operator/infra/azure.go @@ -0,0 +1,570 @@ +package infra + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/cockroachdb/helm-charts/tests/e2e/coredns" + "github.com/cockroachdb/helm-charts/tests/e2e/operator" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/require" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +// --- Azure Constants --- +const ( + azureSubscriptionID = "" // replace with your subscription ID + commonResourceGroup = "cockroachdb-infra-rg" + defaultNodeTagKey = "app" + defaultNodeTagValue = "cockroachdb-node" + defaultK8sVersion = "" // empty = use AKS default +) + +// AzureClusterSetupConfig holds per-cluster parameters +type AzureClusterSetupConfig struct { + Region string // e.g. "centralus", "eastus", "westus" + ResourceGroup string // = commonResourceGroup + VNetName string // = "vnet-" + VNetPrefix string // e.g. "172.28.16.0/16" (unique per-region block) + SubnetName string // = "subnet-" + SubnetPrefix string // e.g. "172.28.16.0/24" + ClusterName string // e.g. "cockroachdb-central" + DNSPrefix string // e.g. "crdb-central" + PodCIDR string // e.g. "10.244.0.0/16" + ServiceCIDR string // e.g. "10.96.0.0/16" + AvailabilityZones []string // e.g. {"1","2","3"} +} + +// Helper functions to get network configuration from common.go +func getAzureVNetPrefix(region string) string { + if config, ok := NetworkConfigs[ProviderAzure][region]; ok { + if prefix, ok := config.(map[string]interface{})["VNetPrefix"].(string); ok { + return prefix + } + } + // Fallback defaults if config not found + return "172.28.0.0/16" +} + +func getAzureSubnetPrefix(region string) string { + if config, ok := NetworkConfigs[ProviderAzure][region]; ok { + if prefix, ok := config.(map[string]interface{})["SubnetPrefix"].(string); ok { + return prefix + } + } + // Fallback defaults if config not found + return "172.28.0.0/24" +} + +func getAzurePodCIDR(region string) string { + if config, ok := NetworkConfigs[ProviderAzure][region]; ok { + if cidr, ok := config.(map[string]interface{})["PodCIDR"].(string); ok { + return cidr + } + } + // Fallback defaults if config not found + return "10.244.0.0/16" +} + +func getAzureServiceCIDR(region string) string { + if config, ok := NetworkConfigs[ProviderAzure][region]; ok { + if cidr, ok := config.(map[string]interface{})["ServiceCIDR"].(string); ok { + return cidr + } + } + // Fallback defaults if config not found + return "10.96.0.0/16" +} + +// One entry per cluster/region +var azureClusterSetups = []AzureClusterSetupConfig{ + { + Region: "centralus", + ResourceGroup: commonResourceGroup, + VNetName: "vnet-centralus", + VNetPrefix: getAzureVNetPrefix("centralus"), + SubnetName: "subnet-centralus", + SubnetPrefix: getAzureSubnetPrefix("centralus"), + ClusterName: "cockroachdb-central", + DNSPrefix: "crdb-central", + PodCIDR: getAzurePodCIDR("centralus"), + ServiceCIDR: getAzureServiceCIDR("centralus"), + AvailabilityZones: []string{"1", "2", "3"}, + }, + { + Region: "eastus", + ResourceGroup: commonResourceGroup, + VNetName: "vnet-eastus", + VNetPrefix: getAzureVNetPrefix("eastus"), + SubnetName: "subnet-eastus", + SubnetPrefix: getAzureSubnetPrefix("eastus"), + ClusterName: "cockroachdb-east", + DNSPrefix: "crdb-east", + PodCIDR: getAzurePodCIDR("eastus"), + ServiceCIDR: getAzureServiceCIDR("eastus"), + AvailabilityZones: []string{"1", "2", "3"}, + }, + { + Region: "westus", + ResourceGroup: commonResourceGroup, + VNetName: "vnet-westus", + VNetPrefix: getAzureVNetPrefix("westus"), + SubnetName: "subnet-westus", + SubnetPrefix: getAzureSubnetPrefix("westus"), + ClusterName: "cockroachdb-west", + DNSPrefix: "crdb-west", + PodCIDR: getAzurePodCIDR("westus"), + ServiceCIDR: getAzureServiceCIDR("westus"), + AvailabilityZones: []string{"1", "2", "3"}, + }, +} + +// AzureRegion implements CloudProvider for Azure +type AzureRegion struct { + *operator.Region +} + +// ScaleNodePool scales the node pool in an AKS cluster +func (r *AzureRegion) ScaleNodePool(t *testing.T, location string, nodeCount, index int) { + t.Logf("[%s] Scaling node pool in cluster %s to %d nodes", ProviderAzure, azureClusterSetups[index].ClusterName, nodeCount) + + // In a real implementation, this would use the Azure SDK to scale the node pool + // This would include getting the node pool and updating its count + t.Logf("[%s] Node pool scaling not fully implemented for Azure", ProviderAzure) + + // Uncomment and complete this code when implementing actual scaling: + /* + ctx := context.Background() + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + t.Logf("[%s] Failed to get Azure credentials: %v", ProviderAzure, err) + return + } + + aksClient, err := armcontainerservice.NewManagedClustersClient(azureSubscriptionID, cred, nil) + if err != nil { + t.Logf("[%s] Failed to create AKS client: %v", ProviderAzure, err) + return + } + + // Actual implementation would go here + */ +} + +func (r *AzureRegion) SetUpInfra(t *testing.T) { + if r.ReusingInfra { + t.Logf("[%s] Reusing existing infrastructure", ProviderAzure) + return + } + + t.Logf("[%s] Setting up infrastructure", ProviderAzure) + ctx := context.Background() + cred, err := azidentity.NewDefaultAzureCredential(nil) + require.NoError(t, err) + + // Clients + rgClient, err := armresources.NewResourceGroupsClient(azureSubscriptionID, cred, nil) + require.NoError(t, err) + vnetClient, err := armnetwork.NewVirtualNetworksClient(azureSubscriptionID, cred, nil) + require.NoError(t, err) + subnetClient, err := armnetwork.NewSubnetsClient(azureSubscriptionID, cred, nil) + require.NoError(t, err) + nsgClient, err := armnetwork.NewSecurityGroupsClient(azureSubscriptionID, cred, nil) + require.NoError(t, err) + pipClient, err := armnetwork.NewPublicIPAddressesClient(azureSubscriptionID, cred, nil) + require.NoError(t, err) + aksClient, err := armcontainerservice.NewManagedClustersClient(azureSubscriptionID, cred, nil) + require.NoError(t, err) + + // 1) Ensure Resource Group exists + _, err = createOrUpdateRG(ctx, rgClient, commonResourceGroup, azureClusterSetups[0].Region) + require.NoError(t, err) + + // Prepare map for CoreDNS options + r.CorednsClusterOptions = make(map[string]coredns.CoreDNSClusterOption) + + // 2) Loop over each cluster config + for i, cfg := range azureClusterSetups[:len(r.Clusters)] { + // Update cluster name to match the one in r.Clusters + cfg.ClusterName = r.Clusters[i] + + t.Logf("[%s] Setting up cluster %s in region %s", ProviderAzure, cfg.ClusterName, cfg.Region) + + // 2a) Create or reuse VNet in this region + _, err := createOrUpdateVNet(ctx, vnetClient, cfg.ResourceGroup, cfg.VNetName, cfg.Region, cfg.VNetPrefix) + require.NoError(t, err) + + // 2b) Create or reuse Subnet inside that VNet + subnetResp, err := createOrUpdateSubnet(ctx, subnetClient, cfg.ResourceGroup, cfg.VNetName, cfg.SubnetName, cfg.SubnetPrefix) + require.NoError(t, err) + + // 2c) Create or reuse NSG and associate it to the Subnet + nsgName := cfg.SubnetName + "-nsg" + nsgRules := []armnetwork.SecurityRule{ + { + Name: ToPtr("AllowWebhook9443"), + Properties: &armnetwork.SecurityRulePropertiesFormat{ + Protocol: ToPtr(armnetwork.SecurityRuleProtocolTCP), + SourceAddressPrefix: ToPtr("VirtualNetwork"), + SourcePortRange: ToPtr("*"), + DestinationAddressPrefix: ToPtr("*"), + DestinationPortRange: ToPtr("9443"), + Access: ToPtr(armnetwork.SecurityRuleAccessAllow), + Priority: ToPtr[int32](100), + Direction: ToPtr(armnetwork.SecurityRuleDirectionInbound), + }, + }, + { + Name: ToPtr("AllowVNetInternal"), + Properties: &armnetwork.SecurityRulePropertiesFormat{ + Protocol: ToPtr(armnetwork.SecurityRuleProtocolAsterisk), + SourceAddressPrefix: ToPtr("VirtualNetwork"), + SourcePortRange: ToPtr("*"), + DestinationAddressPrefix: ToPtr("VirtualNetwork"), + DestinationPortRange: ToPtr("*"), + Access: ToPtr(armnetwork.SecurityRuleAccessAllow), + Priority: ToPtr[int32](110), + Direction: ToPtr(armnetwork.SecurityRuleDirectionInbound), + }, + }, + } + nsgResp, err := createOrUpdateNSG(ctx, nsgClient, cfg.ResourceGroup, nsgName, cfg.Region, nsgRules) + require.NoError(t, err) + err = associateNSG(ctx, subnetClient, cfg.ResourceGroup, cfg.VNetName, cfg.SubnetName, *nsgResp.ID) + require.NoError(t, err) + + // 2d) Create or reuse a Public IP for CoreDNS LB in this region + pipName := fmt.Sprintf("coredns-pip-%s", cfg.Region) + pipResp, err := ensurePublicIP(ctx, pipClient, cfg.ResourceGroup, cfg.Region, pipName) + require.NoError(t, err) + + // 2e) Create or reuse AKS cluster + _, err = createOrUpdateAKS(ctx, aksClient, cfg, *subnetResp.ID) + require.NoError(t, err) + + // 2f) Update kubeconfig via Azure CLI + err = UpdateKubeconfigAzure(t, cfg.ResourceGroup, cfg.ClusterName) + require.NoError(t, err, "failed to get-credentials for %s", cfg.ClusterName) + + // 2g) Prepare CoreDNS options (store static IP) + r.CorednsClusterOptions[operator.CustomDomains[i]] = coredns.CoreDNSClusterOption{ + IPs: []string{*pipResp.Properties.IPAddress}, + Namespace: fmt.Sprintf("%s-%s", operator.Namespace, strings.ToLower(random.UniqueId())), + Domain: operator.CustomDomains[i], + } + } + + // 3) Deploy CoreDNS to each AKS cluster + kubeConfigPath, err := k8s.KubeConfigPathFromHomeDirE() + require.NoError(t, err) + + for i, cfg := range azureClusterSetups[:len(r.Clusters)] { + // 3a) Deploy CoreDNS with proper configuration + pipName := fmt.Sprintf("coredns-pip-%s", cfg.Region) + annotations := GetLoadBalancerAnnotations(ProviderAzure) + + // Add Azure-specific annotations + annotations["service.beta.kubernetes.io/azure-pip-name"] = pipName + annotations["service.beta.kubernetes.io/azure-load-balancer-resource-group"] = cfg.ResourceGroup + + ips := r.CorednsClusterOptions[operator.CustomDomains[i]].IPs + var staticIP *string + if len(ips) > 0 { + staticIP = ToPtr(ips[0]) + } + + err := DeployCoreDNS(t, cfg.ClusterName, kubeConfigPath, staticIP, ProviderAzure, operator.CustomDomains[i], r.CorednsClusterOptions) + require.NoError(t, err, "failed to deploy CoreDNS to cluster %s", cfg.ClusterName) + } + + // 4) Build client map for operator + r.Clients = make(map[string]client.Client) + for _, cfg := range azureClusterSetups[:len(r.Clusters)] { + cfgRest, err := config.GetConfigWithContext(cfg.ClusterName) + require.NoError(t, err) + c, err := client.New(cfgRest, client.Options{}) + require.NoError(t, err) + r.Clients[cfg.ClusterName] = c + } + + r.ReusingInfra = true + t.Logf("[%s] Infrastructure setup completed", ProviderAzure) +} + +// TeardownInfra deletes AKS clusters, Public IPs, Subnets, VNets, and Resource Group +func (r *AzureRegion) TeardownInfra(t *testing.T) { + t.Logf("[%s] Starting infrastructure teardown", ProviderAzure) + ctx := context.Background() + cred, err := azidentity.NewDefaultAzureCredential(nil) + require.NoError(t, err) + + rgClient, _ := armresources.NewResourceGroupsClient(azureSubscriptionID, cred, nil) + vnetClient, _ := armnetwork.NewVirtualNetworksClient(azureSubscriptionID, cred, nil) + subnetClient, _ := armnetwork.NewSubnetsClient(azureSubscriptionID, cred, nil) + nsgClient, _ := armnetwork.NewSecurityGroupsClient(azureSubscriptionID, cred, nil) + pipClient, _ := armnetwork.NewPublicIPAddressesClient(azureSubscriptionID, cred, nil) + aksClient, _ := armcontainerservice.NewManagedClustersClient(azureSubscriptionID, cred, nil) + + // 1) Delete AKS clusters + for _, cfg := range azureClusterSetups[:len(r.Clusters)] { + t.Logf("[%s] Deleting AKS cluster '%s'", ProviderAzure, cfg.ClusterName) + _, err := aksClient.BeginDelete(ctx, cfg.ResourceGroup, cfg.ClusterName, nil) + if err != nil && !IsResourceNotFound(err) { + t.Logf("[%s] Warning: error deleting AKS %s: %v", ProviderAzure, cfg.ClusterName, err) + } + } + + // 2) Delete Public IPs + for _, cfg := range azureClusterSetups[:len(r.Clusters)] { + pipName := fmt.Sprintf("coredns-pip-%s", cfg.Region) + t.Logf("[%s] Deleting Public IP '%s'", ProviderAzure, pipName) + _, err := pipClient.BeginDelete(ctx, cfg.ResourceGroup, pipName, nil) + if err != nil && !IsResourceNotFound(err) { + t.Logf("[%s] Warning: error deleting PublicIP %s: %v", ProviderAzure, pipName, err) + } + } + + // 3) Delete NSGs + for _, cfg := range azureClusterSetups[:len(r.Clusters)] { + nsgName := cfg.SubnetName + "-nsg" + t.Logf("[%s] Deleting NSG '%s'", ProviderAzure, nsgName) + _, err := nsgClient.BeginDelete(ctx, cfg.ResourceGroup, nsgName, nil) + if err != nil && !IsResourceNotFound(err) { + t.Logf("[%s] Warning: error deleting NSG %s: %v", ProviderAzure, nsgName, err) + } + } + + // 4) Delete Subnets + for _, cfg := range azureClusterSetups[:len(r.Clusters)] { + t.Logf("[%s] Deleting Subnet '%s'", ProviderAzure, cfg.SubnetName) + _, err := subnetClient.BeginDelete(ctx, cfg.ResourceGroup, cfg.VNetName, cfg.SubnetName, nil) + if err != nil && !IsResourceNotFound(err) { + t.Logf("[%s] Warning: error deleting Subnet %s: %v", ProviderAzure, cfg.SubnetName, err) + } + } + + // 5) Delete VNets + for _, cfg := range azureClusterSetups[:len(r.Clusters)] { + t.Logf("[%s] Deleting VNet '%s'", ProviderAzure, cfg.VNetName) + _, err := vnetClient.BeginDelete(ctx, cfg.ResourceGroup, cfg.VNetName, nil) + if err != nil && !IsResourceNotFound(err) { + t.Logf("[%s] Warning: error deleting VNet %s: %v", ProviderAzure, cfg.VNetName, err) + } + } + + // 6) Delete Resource Group (and everything inside) + t.Logf("[%s] Deleting Resource Group '%s'", ProviderAzure, commonResourceGroup) + _, err = rgClient.BeginDelete(ctx, commonResourceGroup, nil) + if err != nil && !IsResourceNotFound(err) { + t.Logf("[%s] Warning: error deleting ResourceGroup %s: %v", ProviderAzure, commonResourceGroup, err) + } + + t.Logf("[%s] Infrastructure teardown completed", ProviderAzure) +} + +// --- Helper Functions --- + +func createOrUpdateRG(ctx context.Context, client *armresources.ResourceGroupsClient, name, location string) (*armresources.ResourceGroup, error) { + _, err := client.Get(ctx, name, nil) + if err == nil { + return nil, nil + } + var respErr *azcore.ResponseError + if !strings.Contains(strings.ToLower(err.Error()), "notfound") && !errors.As(err, &respErr) { + return nil, err + } + resp, err := client.CreateOrUpdate(ctx, name, armresources.ResourceGroup{Location: ToPtr(location)}, nil) + if err != nil { + return nil, err + } + return &resp.ResourceGroup, nil +} + +func createOrUpdateVNet(ctx context.Context, client *armnetwork.VirtualNetworksClient, rg, name, location, prefix string) (*armnetwork.VirtualNetwork, error) { + _, err := client.Get(ctx, rg, name, nil) + if err == nil { + return nil, nil + } + var respErr *azcore.ResponseError + if !strings.Contains(strings.ToLower(err.Error()), "notfound") && !errors.As(err, &respErr) { + return nil, err + } + poller, err := client.BeginCreateOrUpdate(ctx, rg, name, armnetwork.VirtualNetwork{ + Location: ToPtr(location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{AddressPrefixes: []*string{ToPtr(prefix)}}, + }, + }, nil) + if err != nil { + return nil, err + } + resp, err := poller.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + return &resp.VirtualNetwork, nil +} + +func createOrUpdateSubnet(ctx context.Context, client *armnetwork.SubnetsClient, rg, vnet, name, prefix string) (*armnetwork.Subnet, error) { + respOld, err := client.Get(ctx, rg, vnet, name, nil) + if err == nil { + return &respOld.Subnet, nil + } + var respErr *azcore.ResponseError + if !strings.Contains(strings.ToLower(err.Error()), "notfound") && !errors.As(err, &respErr) { + return nil, err + } + poller, err := client.BeginCreateOrUpdate(ctx, rg, vnet, name, armnetwork.Subnet{ + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: ToPtr(prefix), + }, + }, nil) + if err != nil { + return nil, err + } + resp, err := poller.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + return &resp.Subnet, nil +} + +func createOrUpdateNSG(ctx context.Context, client *armnetwork.SecurityGroupsClient, rg, name, location string, rules []armnetwork.SecurityRule) (*armnetwork.SecurityGroup, error) { + respOld, err := client.Get(ctx, rg, name, nil) + if err == nil { + return &respOld.SecurityGroup, nil + } + var respErr *azcore.ResponseError + if !strings.Contains(strings.ToLower(err.Error()), "notfound") && !errors.As(err, &respErr) { + return nil, err + } + poller, err := client.BeginCreateOrUpdate(ctx, rg, name, armnetwork.SecurityGroup{ + Location: ToPtr(location), + Properties: &armnetwork.SecurityGroupPropertiesFormat{SecurityRules: Pointers(rules)}, + }, nil) + if err != nil { + return nil, err + } + resp, err := poller.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + return &resp.SecurityGroup, nil +} + +func associateNSG(ctx context.Context, client *armnetwork.SubnetsClient, rg, vnet, subnetName, nsgID string) error { + subnetResp, err := client.Get(ctx, rg, vnet, subnetName, nil) + if err != nil { + return err + } + subnet := subnetResp.Subnet + subnet.Properties.NetworkSecurityGroup = &armnetwork.SecurityGroup{ID: ToPtr(nsgID)} + poller, err := client.BeginCreateOrUpdate(ctx, rg, vnet, subnetName, armnetwork.Subnet{ + Properties: subnet.Properties, + }, nil) + if err != nil { + return err + } + _, err = poller.PollUntilDone(ctx, nil) + return err +} + +func ensurePublicIP(ctx context.Context, client *armnetwork.PublicIPAddressesClient, rg, region, name string) (*armnetwork.PublicIPAddress, error) { + respOld, err := client.Get(ctx, rg, name, nil) + if err == nil { + return &respOld.PublicIPAddress, nil + } + var respErr *azcore.ResponseError + if !strings.Contains(strings.ToLower(err.Error()), "notfound") && !errors.As(err, &respErr) { + return nil, err + } + poller, err := client.BeginCreateOrUpdate(ctx, rg, name, armnetwork.PublicIPAddress{ + Location: ToPtr(region), + Properties: &armnetwork.PublicIPAddressPropertiesFormat{ + PublicIPAllocationMethod: ToPtr(armnetwork.IPAllocationMethodStatic), + }, + SKU: &armnetwork.PublicIPAddressSKU{Name: ToPtr(armnetwork.PublicIPAddressSKUNameStandard)}, + Tags: map[string]*string{defaultNodeTagKey: ToPtr(defaultNodeTagValue)}, + }, nil) + if err != nil { + return nil, err + } + resp, err := poller.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + return &resp.PublicIPAddress, nil +} + +func createOrUpdateAKS(ctx context.Context, client *armcontainerservice.ManagedClustersClient, cfg AzureClusterSetupConfig, subnetID string) (*armcontainerservice.ManagedCluster, error) { + _, err := client.Get(ctx, cfg.ResourceGroup, cfg.ClusterName, nil) + if err == nil { + return nil, nil + } + var respErr *azcore.ResponseError + if !strings.Contains(strings.ToLower(err.Error()), "notfound") && !errors.As(err, &respErr) { + return nil, err + } + k8sVer := cfg.ServiceCIDR // placeholder; we let AKS pick default if left empty + if defaultK8sVersion != "" { + k8sVer = defaultK8sVersion + } + agentPool := armcontainerservice.ManagedClusterAgentPoolProfile{ + Name: ToPtr("agentpool"), + Count: ToPtr[int32](DefaultNodeCount), + VMSize: ToPtr(AzureDefaultVMSize), + OSDiskSizeGB: ToPtr[int32](128), + OSType: ToPtr(armcontainerservice.OSTypeLinux), + Mode: ToPtr(armcontainerservice.AgentPoolModeSystem), + Type: ToPtr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), + VnetSubnetID: ToPtr(subnetID), + AvailabilityZones: Pointers(cfg.AvailabilityZones), + EnableNodePublicIP: ToPtr(false), + } + networkProfile := &armcontainerservice.NetworkProfile{ + NetworkPlugin: ToPtr(armcontainerservice.NetworkPluginAzure), + PodCidr: ToPtr(cfg.PodCIDR), + ServiceCidr: ToPtr(cfg.ServiceCIDR), + DNSServiceIP: calcDNSIP(cfg.ServiceCIDR), + } + managedCluster := armcontainerservice.ManagedCluster{ + Location: ToPtr(cfg.Region), + Properties: &armcontainerservice.ManagedClusterProperties{ + DNSPrefix: ToPtr(cfg.DNSPrefix), + KubernetesVersion: ToPtr(k8sVer), + AgentPoolProfiles: []*armcontainerservice.ManagedClusterAgentPoolProfile{&agentPool}, + NetworkProfile: networkProfile, + APIServerAccessProfile: &armcontainerservice.ManagedClusterAPIServerAccessProfile{AuthorizedIPRanges: []*string{ToPtr("0.0.0.0/0")}}, + EnableRBAC: ToPtr(true), + AutoUpgradeProfile: &armcontainerservice.ManagedClusterAutoUpgradeProfile{UpgradeChannel: ToPtr(armcontainerservice.UpgradeChannelPatch)}, + }, + Tags: map[string]*string{defaultNodeTagKey: ToPtr(defaultNodeTagValue)}, + } + poller, err := client.BeginCreateOrUpdate(ctx, cfg.ResourceGroup, cfg.ClusterName, managedCluster, nil) + if err != nil { + return nil, err + } + resp, err := poller.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + return &resp.ManagedCluster, nil +} + +func calcDNSIP(serviceCIDR string) *string { + octets := strings.Split(strings.Split(serviceCIDR, "/")[0], ".") + if len(octets) == 4 { + return ToPtr(fmt.Sprintf("%s.%s.0.10", octets[0], octets[1])) + } + return nil +} diff --git a/tests/e2e/operator/infra/kind.go b/tests/e2e/operator/infra/kind.go new file mode 100644 index 000000000..df03db5f4 --- /dev/null +++ b/tests/e2e/operator/infra/kind.go @@ -0,0 +1,39 @@ +package infra + +import ( + "testing" + + "github.com/cockroachdb/helm-charts/tests/e2e/operator" +) + +type Provider interface { + SetUpInfra(t *testing.T) + TeardownInfra(t *testing.T) + ScaleNodePool(t *testing.T, location string, nodeCount, index int) +} + +// KindRegion implements CloudProvider for Kind +type KindRegion struct { + *operator.Region +} + +// SetUpInfra Creates Kind clusters, deploy calico CNI, deploy coredns in each cluster. +func (r *KindRegion) SetUpInfra(t *testing.T) { + t.Logf("[%s] Kind setup not fully implemented", ProviderKind) +} + +// TeardownInfra cleans up all resources created by SetUpInfra +func (r *KindRegion) TeardownInfra(t *testing.T) { + t.Logf("[%s] Kind teardown not implemented - clusters will be cleaned up by the test framework", ProviderKind) +} + +// ScaleNodePool scales the node pool in a Kind cluster +func (r *KindRegion) ScaleNodePool(t *testing.T, location string, nodeCount, index int) { + t.Logf("[%s] Kind scaling not implemented - Kind doesn't support scaling node pools", ProviderKind) +} + +/* + Try running everything in a different go routine. + Follow the same API's, struct for each infra. + Each infra should just have the additional details needed. +*/ diff --git a/tests/e2e/operator/infra/provider.go b/tests/e2e/operator/infra/provider.go index 33a6124c7..10f306821 100644 --- a/tests/e2e/operator/infra/provider.go +++ b/tests/e2e/operator/infra/provider.go @@ -11,13 +11,15 @@ import ( // can implement them as no-ops with appropriate logging. type CloudProvider interface { // SetUpInfra creates the necessary infrastructure for the tests + // This is the only required method for all providers SetUpInfra(t *testing.T) // TeardownInfra cleans up all resources created by SetUpInfra + // Optional: providers that don't support teardown can implement as no-op TeardownInfra(t *testing.T) // ScaleNodePool scales the node pool in a cluster - // Optional: providers that don't support scaling/ if auto-scaling is enabled can implement as no-op + // Optional: providers that don't support scaling/if auto-scaling is enabled can implement as no-op ScaleNodePool(t *testing.T, location string, nodeCount, index int) // CanScale checks if the provider supports scaling. @@ -31,10 +33,22 @@ func ProviderFactory(providerType string, region *operator.Region) CloudProvider provider := K3dRegion{Region: region} provider.RegionCodes = GetRegionCodes(providerType) return &provider + case ProviderKind: + provider := KindRegion{Region: region} + provider.RegionCodes = GetRegionCodes(providerType) + return &provider case ProviderGCP: provider := GcpRegion{Region: region} provider.RegionCodes = GetRegionCodes(providerType) return &provider + case ProviderAzure: + provider := AzureRegion{Region: region} + provider.RegionCodes = GetRegionCodes(providerType) + return &provider + case ProviderAWS: + provider := AwsRegion{Region: region} + provider.RegionCodes = GetRegionCodes(providerType) + return &provider default: return nil }