diff --git a/.golangci.yaml b/.golangci.yaml index bb013d5e..94ac271b 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -10,6 +10,7 @@ linters: - goconst - forbidigo - predeclared + - gochecknoglobals linters-settings: revive: rules: @@ -35,6 +36,11 @@ issues: exclude-dirs: - pkg/generated/* - clients/rancher/generated/* + exclude-rules: + #Scheme and v3 schemas are being skipped due to the complexity of updating the global vars used in Shepherd clients + - path: pkg/* + linters: + - gochecknoglobals exclude-files: - ^*\.yaml$ - ^*\.yml$ diff --git a/clients/dynamic/dynamic.go b/clients/dynamic/dynamic.go index b46ec3ec..6e343397 100644 --- a/clients/dynamic/dynamic.go +++ b/clients/dynamic/dynamic.go @@ -72,20 +72,18 @@ type ResourceClient struct { ts *session.Session } -var ( +func needsCleanup(obj *unstructured.Unstructured) bool { // some GVKs are special and cannot be cleaned up because they do not exist // after being created (eg: SelfSubjectAccessReview). We'll not register // cleanup functions when creating objects of these kinds. - noCleanupGVKs = []schema.GroupVersionKind{ + noCleanupGVKs := []schema.GroupVersionKind{ { Group: "authorization.k8s.io", Version: "v1", Kind: "SelfSubjectAccessReview", }, } -) -func needsCleanup(obj *unstructured.Unstructured) bool { for _, gvk := range noCleanupGVKs { if obj.GroupVersionKind() == gvk { return false diff --git a/clients/helm/helm.go b/clients/helm/helm.go index 43a1add2..b6577a37 100644 --- a/clients/helm/helm.go +++ b/clients/helm/helm.go @@ -7,7 +7,7 @@ import ( "github.com/rancher/shepherd/pkg/session" ) -var helmCmd = "helm_v3" +const helmCmd = "helm_v3" // InstallChart installs a helm chart using helm CLI. // Send the helm set command strings such as "--set", "installCRDs=true" diff --git a/clients/k3d/k3d.go b/clients/k3d/k3d.go index d3b4439d..7f4dd209 100644 --- a/clients/k3d/k3d.go +++ b/clients/k3d/k3d.go @@ -9,7 +9,9 @@ import ( apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/session" "github.com/rancher/shepherd/pkg/wait" @@ -21,8 +23,6 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -var importTimeout = int64(60 * 20) - // CreateK3DCluster creates a minimal k3d cluster and returns a rest config for connecting to the newly created cluster. // If a name is not given a random one will be generated. func CreateK3DCluster(ts *session.Session, name, hostname string, servers, agents int) (*rest.Config, error) { @@ -102,10 +102,10 @@ func CreateAndImportK3DCluster(client *rancher.Client, name, image, hostname str cluster := &apisV1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: "fleet-default", + Namespace: namespaces.Fleet, }, } - clusterObj, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).Create(cluster) + clusterObj, err := client.Steve.SteveType(stevetypes.Provisioning).Create(cluster) if err != nil { return nil, errors.Wrap(err, "CreateAndImportK3DCluster: failed to create provisioning cluster") } @@ -114,7 +114,7 @@ func CreateAndImportK3DCluster(client *rancher.Client, name, image, hostname str logrus.Infof("Creating K3D cluster...") downRest, err := CreateK3DCluster(client.Session, name, hostname, servers, agents) if err != nil { - _ = client.Steve.SteveType(clusters.ProvisioningSteveResourceType).Delete(clusterObj) + _ = client.Steve.SteveType(stevetypes.Provisioning).Delete(clusterObj) return nil, errors.Wrap(err, "CreateAndImportK3DCluster: failed to create k3d cluster") } @@ -132,9 +132,9 @@ func CreateAndImportK3DCluster(client *rancher.Client, name, image, hostname str } // wait for the provisioning cluster logrus.Infof("Waiting for provisioning cluster...") - clusterWatch, err := kubeProvisioningClient.Clusters("fleet-default").Watch(context.TODO(), metav1.ListOptions{ + clusterWatch, err := kubeProvisioningClient.Clusters(namespaces.Fleet).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return nil, errors.Wrap(err, "CreateAndImportK3DCluster: failed to watch for the imported cluster") @@ -144,7 +144,7 @@ func CreateAndImportK3DCluster(client *rancher.Client, name, image, hostname str err = wait.WatchWait(clusterWatch, func(event watch.Event) (bool, error) { cluster := event.Object.(*apisV1.Cluster) if cluster.Name == name { - impCluster, err = kubeProvisioningClient.Clusters("fleet-default").Get(context.TODO(), name, metav1.GetOptions{}) + impCluster, err = kubeProvisioningClient.Clusters(namespaces.Fleet).Get(context.TODO(), name, metav1.GetOptions{}) return true, err } @@ -164,9 +164,9 @@ func CreateAndImportK3DCluster(client *rancher.Client, name, image, hostname str // wait for the imported cluster to be ready logrus.Infof("Waiting for imported cluster...") - clusterWatch, err = kubeProvisioningClient.Clusters("fleet-default").Watch(context.TODO(), metav1.ListOptions{ + clusterWatch, err = kubeProvisioningClient.Clusters(namespaces.Fleet).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + name, - TimeoutSeconds: &importTimeout, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.TwentyMinute), }) checkFunc := clusters.IsImportedClusterReady diff --git a/clients/rancher/catalog/clusterrepo.go b/clients/rancher/catalog/clusterrepo.go index 94ffd7f7..b3985d87 100644 --- a/clients/rancher/catalog/clusterrepo.go +++ b/clients/rancher/catalog/clusterrepo.go @@ -14,8 +14,6 @@ import ( ) const ( - ClusterRepoSteveResourceType = "catalog.cattle.io.clusterrepo" - action = "action" chartsURL = "v1/catalog.cattle.io.clusterrepos/" link = "link" diff --git a/clients/rkecli/state.go b/clients/rkecli/state.go index b6c1f15c..c209478a 100644 --- a/clients/rkecli/state.go +++ b/clients/rkecli/state.go @@ -12,7 +12,7 @@ import ( "github.com/rancher/shepherd/clients/rancher" v3 "github.com/rancher/shepherd/clients/rancher/generated/management/v3" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/configmaps" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/file" "gopkg.in/yaml.v2" @@ -169,7 +169,7 @@ func NewStateFile(state *cluster.FullState, dirName string) (stateFilePath strin // GetFullState is a function that gets RKE full state from "full-cluster-state" configmap. // And returns the cluster full state. func GetFullState(client *rancher.Client) (state *cluster.FullState, err error) { - namespacedConfigmapClient := client.Steve.SteveType(configmaps.ConfigMapSteveType).NamespacedSteveClient(cluster.SystemNamespace) + namespacedConfigmapClient := client.Steve.SteveType(stevetypes.Configmap).NamespacedSteveClient(cluster.SystemNamespace) if err != nil { return } diff --git a/extensions/charts/awsoutoftree.go b/extensions/charts/awsoutoftree.go index 9177c67b..4fe7fd18 100644 --- a/extensions/charts/awsoutoftree.go +++ b/extensions/charts/awsoutoftree.go @@ -9,17 +9,15 @@ import ( "github.com/rancher/shepherd/clients/rancher" steveV1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/workloads/pods" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" kwait "k8s.io/apimachinery/pkg/util/wait" ) const ( - repoType = "catalog.cattle.io.clusterrepo" - appsType = "catalog.cattle.io.apps" awsUpstreamCloudProviderRepo = "https://github.com/kubernetes/cloud-provider-aws.git" masterBranch = "master" AwsUpstreamChartName = "aws-cloud-controller-manager" - kubeSystemNamespace = "kube-system" ) // InstallAWSOutOfTreeChart installs the CSI chart for aws cloud provider in a given cluster. @@ -37,12 +35,12 @@ func InstallAWSOutOfTreeChart(client *rancher.Client, installOptions *InstallOpt awsChartInstallActionPayload := &payloadOpts{ InstallOptions: *installOptions, Name: AwsUpstreamChartName, - Namespace: kubeSystemNamespace, + Namespace: namespaces.KubeSystem, Host: serverSetting.Value, DefaultRegistry: registrySetting.Value, } - chartInstallAction := awsChartInstallAction(awsChartInstallActionPayload, repoName, kubeSystemNamespace, installOptions.ProjectID, isLeaderMigration) + chartInstallAction := awsChartInstallAction(awsChartInstallActionPayload, repoName, namespaces.KubeSystem, installOptions.ProjectID, isLeaderMigration) catalogClient, err := client.GetClusterCatalogClient(installOptions.Cluster.ID) if err != nil { @@ -54,7 +52,7 @@ func InstallAWSOutOfTreeChart(client *rancher.Client, installOptions *InstallOpt return err } - err = VerifyChartInstall(catalogClient, kubeSystemNamespace, AwsUpstreamChartName) + err = VerifyChartInstall(catalogClient, namespaces.KubeSystem, AwsUpstreamChartName) if err != nil { return err } @@ -67,7 +65,7 @@ func InstallAWSOutOfTreeChart(client *rancher.Client, installOptions *InstallOpt chartNodeSelector := map[string]string{ "node-role.kubernetes.io/controlplane": "true", } - err = updateHelmNodeSelectors(steveclient, kubeSystemNamespace, AwsUpstreamChartName, chartNodeSelector) + err = updateHelmNodeSelectors(steveclient, namespaces.KubeSystem, AwsUpstreamChartName, chartNodeSelector) return err } @@ -240,7 +238,7 @@ func awsChartInstallAction(awsChartInstallActionPayload *payloadOpts, repoName, // upstream bug in helm charts, where you can't override the nodeSelector during a deployment of an upstream chart. func updateHelmNodeSelectors(client *steveV1.Client, daemonsetNamespace, daemonsetName string, newNodeSelector map[string]string) error { err := kwait.Poll(1*time.Second, 1*time.Minute, func() (done bool, err error) { - _, err = client.SteveType(pods.DaemonsetSteveType).ByID(daemonsetNamespace + "/" + daemonsetName) + _, err = client.SteveType(stevetypes.Daemonset).ByID(daemonsetNamespace + "/" + daemonsetName) if err != nil { return false, nil } @@ -250,7 +248,7 @@ func updateHelmNodeSelectors(client *steveV1.Client, daemonsetNamespace, daemons return err } - steveDaemonset, err := client.SteveType(pods.DaemonsetSteveType).ByID(daemonsetNamespace + "/" + daemonsetName) + steveDaemonset, err := client.SteveType(stevetypes.Daemonset).ByID(daemonsetNamespace + "/" + daemonsetName) if err != nil { return err } @@ -263,6 +261,6 @@ func updateHelmNodeSelectors(client *steveV1.Client, daemonsetNamespace, daemons daemonsetObject.Spec.Template.Spec.NodeSelector = newNodeSelector - _, err = client.SteveType(pods.DaemonsetSteveType).Update(steveDaemonset, daemonsetObject) + _, err = client.SteveType(stevetypes.Daemonset).Update(steveDaemonset, daemonsetObject) return err } diff --git a/extensions/charts/charts.go b/extensions/charts/charts.go index 59fe6444..40180ea2 100644 --- a/extensions/charts/charts.go +++ b/extensions/charts/charts.go @@ -8,9 +8,10 @@ import ( "github.com/rancher/shepherd/clients/rancher" steveV1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/defaults" - "github.com/rancher/shepherd/extensions/kubeapi/workloads/daemonsets" - "github.com/rancher/shepherd/extensions/kubeapi/workloads/deployments" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/api/scheme" "github.com/rancher/shepherd/pkg/wait" appv1 "k8s.io/api/apps/v1" @@ -26,7 +27,6 @@ const ( // serverURLSettingID is a private constant string that contains the ID of server URL setting. serverURLSettingID = "server-url" rancherChartsName = "rancher-charts" - active = "active" ) // InstallOptions is a struct of the required options to install a chart. @@ -134,7 +134,7 @@ func WatchAndWaitDeployments(client *rancher.Client, clusterID, namespace string if err != nil { return err } - adminDeploymentResource := adminDynamicClient.Resource(deployments.DeploymentGroupVersionResource).Namespace(namespace) + adminDeploymentResource := adminDynamicClient.Resource(groupversionresources.Deployment()).Namespace(namespace) deployments, err := adminDeploymentResource.List(context.TODO(), listOptions) if err != nil { @@ -156,7 +156,7 @@ func WatchAndWaitDeployments(client *rancher.Client, clusterID, namespace string for _, deployment := range deploymentList { watchAppInterface, err := adminDeploymentResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + deployment.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -192,11 +192,11 @@ func WatchAndWaitDeploymentForAnnotation(client *rancher.Client, clusterID, name if err != nil { return err } - adminDeploymentResource := adminDynamicClient.Resource(deployments.DeploymentGroupVersionResource).Namespace(namespace) + adminDeploymentResource := adminDynamicClient.Resource(groupversionresources.Deployment()).Namespace(namespace) watchAppInterface, err := adminDeploymentResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + deploymentName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -234,7 +234,7 @@ func WatchAndWaitDaemonSets(client *rancher.Client, clusterID, namespace string, if err != nil { return err } - adminDaemonSetResource := adminDynamicClient.Resource(daemonsets.DaemonSetGroupVersionResource).Namespace(namespace) + adminDaemonSetResource := adminDynamicClient.Resource(groupversionresources.Daemonset()).Namespace(namespace) daemonSets, err := adminDaemonSetResource.List(context.TODO(), listOptions) if err != nil { @@ -256,7 +256,7 @@ func WatchAndWaitDaemonSets(client *rancher.Client, clusterID, namespace string, for _, daemonSet := range daemonSetList { watchAppInterface, err := adminDaemonSetResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + daemonSet.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -314,7 +314,7 @@ func WatchAndWaitStatefulSets(client *rancher.Client, clusterID, namespace strin for _, statefulSet := range statefulSetList { watchAppInterface, err := adminStatefulSetResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + statefulSet.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -351,20 +351,20 @@ func CreateChartRepoFromGithub(client *steveV1.Client, githubURL, githubBranch, InsecureSkipTLSverify: true, }, } - _, err := client.SteveType(repoType).Create(repoObject) + _, err := client.SteveType(stevetypes.ClusterRepo).Create(repoObject) if err != nil { return err } err = kwait.Poll(1*time.Second, 2*time.Minute, func() (done bool, err error) { - res, err := client.SteveType(repoType).List(nil) + res, err := client.SteveType(stevetypes.ClusterRepo).List(nil) if err != nil { return false, err } for _, repo := range res.Data { if repo.Name == repoName { - if repo.State.Name == active { + if repo.State.Name == states.Active { return true, nil } } diff --git a/extensions/charts/payloads.go b/extensions/charts/payloads.go index 4bd4b9ad..86980a00 100644 --- a/extensions/charts/payloads.go +++ b/extensions/charts/payloads.go @@ -4,6 +4,7 @@ import ( "time" v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/extensions/defaults/annotations" "github.com/rancher/shepherd/pkg/api/steve/catalog/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -50,8 +51,8 @@ func newChartUpgradeAction(namespace string, chartUpgrades []types.ChartUpgrade) func newChartInstall(name, version, clusterID, clusterName, url, repoName, projectID, defaultRegistry string, chartValues map[string]interface{}) *types.ChartInstall { chartInstall := types.ChartInstall{ Annotations: map[string]string{ - "catalog.cattle.io/ui-source-repo": repoName, - "catalog.cattle.io/ui-source-repo-type": "cluster", + annotations.UiSourceRepo: repoName, + annotations.UiSourceRepoType: "cluster", }, ChartName: name, ReleaseName: name, @@ -83,8 +84,8 @@ func newChartInstall(name, version, clusterID, clusterName, url, repoName, proje func newChartUpgrade(name, version, clusterID, clusterName, url, defaultRegistry string, chartValues map[string]interface{}) *types.ChartUpgrade { chartUpgrade := types.ChartUpgrade{ Annotations: map[string]string{ - "catalog.cattle.io/ui-source-repo": "rancher-charts", - "catalog.cattle.io/ui-source-repo-type": "cluster", + annotations.UiSourceRepo: "rancher-charts", + annotations.UiSourceRepoType: "cluster", }, ChartName: name, ReleaseName: name, diff --git a/extensions/charts/rancheralerting.go b/extensions/charts/rancheralerting.go index 809527be..d5385f48 100644 --- a/extensions/charts/rancheralerting.go +++ b/extensions/charts/rancheralerting.go @@ -7,8 +7,8 @@ import ( catalogv1 "github.com/rancher/rancher/pkg/apis/catalog.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/shepherd/clients/rancher/catalog" - "github.com/rancher/shepherd/extensions/defaults" - kubenamespaces "github.com/rancher/shepherd/extensions/kubeapi/namespaces" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/namespaces" "github.com/rancher/shepherd/pkg/api/steve/catalog/types" "github.com/rancher/shepherd/pkg/wait" @@ -65,7 +65,7 @@ func InstallRancherAlertingChart(client *rancher.Client, installOptions *Install watchAppInterface, err := catalogClient.Apps(RancherAlertingNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherAlertingName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -115,11 +115,11 @@ func InstallRancherAlertingChart(client *rancher.Client, installOptions *Install if err != nil { return err } - adminNamespaceResource := adminDynamicClient.Resource(kubenamespaces.NamespaceGroupVersionResource).Namespace("") + adminNamespaceResource := adminDynamicClient.Resource(groupversionresources.Namespace()).Namespace("") watchNamespaceInterface, err := adminNamespaceResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherAlertingNamespace, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -147,7 +147,7 @@ func InstallRancherAlertingChart(client *rancher.Client, installOptions *Install // wait for chart to be full deployed watchAppInterface, err := catalogClient.Apps(RancherAlertingNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherAlertingName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err diff --git a/extensions/charts/ranchergatekeeper.go b/extensions/charts/ranchergatekeeper.go index d27291fb..07bfb3f0 100644 --- a/extensions/charts/ranchergatekeeper.go +++ b/extensions/charts/ranchergatekeeper.go @@ -7,9 +7,9 @@ import ( catalogv1 "github.com/rancher/rancher/pkg/apis/catalog.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/shepherd/clients/rancher/catalog" - "github.com/rancher/shepherd/extensions/defaults" - kubenamespaces "github.com/rancher/shepherd/extensions/kubeapi/namespaces" - "github.com/rancher/shepherd/extensions/namespaces" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/api/steve/catalog/types" "github.com/rancher/shepherd/pkg/wait" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -66,7 +66,7 @@ func InstallRancherGatekeeperChart(client *rancher.Client, installOptions *Insta watchAppInterface, err := catalogClient.Apps(RancherGatekeeperNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherGatekeeperName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -94,7 +94,7 @@ func InstallRancherGatekeeperChart(client *rancher.Client, installOptions *Insta watchAppInterface, err = catalogClient.Apps(RancherGatekeeperNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherGatekeeperCRDName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -120,7 +120,7 @@ func InstallRancherGatekeeperChart(client *rancher.Client, installOptions *Insta return err } - namespaceClient := steveclient.SteveType(namespaces.NamespaceSteveType) + namespaceClient := steveclient.SteveType(stevetypes.Namespace) namespace, err := namespaceClient.ByID(RancherGatekeeperNamespace) if err != nil { @@ -140,11 +140,11 @@ func InstallRancherGatekeeperChart(client *rancher.Client, installOptions *Insta if err != nil { return err } - adminNamespaceResource := adminDynamicClient.Resource(kubenamespaces.NamespaceGroupVersionResource).Namespace("") + adminNamespaceResource := adminDynamicClient.Resource(groupversionresources.Namespace()).Namespace("") watchNamespaceInterface, err := adminNamespaceResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherGatekeeperNamespace, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -166,7 +166,7 @@ func InstallRancherGatekeeperChart(client *rancher.Client, installOptions *Insta // wait for chart to be fully deployed watchAppInterface, err := catalogClient.Apps(RancherGatekeeperNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherGatekeeperName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -243,7 +243,7 @@ func UpgradeRancherGatekeeperChart(client *rancher.Client, installOptions *Insta // wait for chart to be in status pending upgrade watchAppInterface, err := adminCatalogClient.Apps(RancherGatekeeperNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherGatekeeperName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -265,7 +265,7 @@ func UpgradeRancherGatekeeperChart(client *rancher.Client, installOptions *Insta // wait for chart to be full deployed watchAppInterface, err = adminCatalogClient.Apps(RancherGatekeeperNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherGatekeeperName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err diff --git a/extensions/charts/rancheristio.go b/extensions/charts/rancheristio.go index b1b8482b..bbe72fea 100644 --- a/extensions/charts/rancheristio.go +++ b/extensions/charts/rancheristio.go @@ -7,9 +7,9 @@ import ( catalogv1 "github.com/rancher/rancher/pkg/apis/catalog.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/shepherd/clients/rancher/catalog" - "github.com/rancher/shepherd/extensions/defaults" - kubenamespaces "github.com/rancher/shepherd/extensions/kubeapi/namespaces" - "github.com/rancher/shepherd/extensions/namespaces" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/api/steve/catalog/types" "github.com/rancher/shepherd/pkg/wait" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -62,7 +62,7 @@ func InstallRancherIstioChart(client *rancher.Client, installOptions *InstallOpt watchAppInterface, err := catalogClient.Apps(RancherIstioNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherIstioName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -85,7 +85,7 @@ func InstallRancherIstioChart(client *rancher.Client, installOptions *InstallOpt return err } - namespaceClient := steveclient.SteveType(namespaces.NamespaceSteveType) + namespaceClient := steveclient.SteveType(stevetypes.Namespace) namespace, err := namespaceClient.ByID(RancherIstioNamespace) if err != nil { @@ -105,11 +105,11 @@ func InstallRancherIstioChart(client *rancher.Client, installOptions *InstallOpt if err != nil { return err } - adminNamespaceResource := adminDynamicClient.Resource(kubenamespaces.NamespaceGroupVersionResource).Namespace("") + adminNamespaceResource := adminDynamicClient.Resource(groupversionresources.Namespace()).Namespace("") watchNamespaceInterface, err := adminNamespaceResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherIstioNamespace, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -131,7 +131,7 @@ func InstallRancherIstioChart(client *rancher.Client, installOptions *InstallOpt // wait for chart to be full deployed watchAppInterface, err := catalogClient.Apps(RancherIstioNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherIstioName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -229,7 +229,7 @@ func UpgradeRancherIstioChart(client *rancher.Client, installOptions *InstallOpt // wait for chart to be in status pending upgrade watchAppInterface, err := adminCatalogClient.Apps(RancherIstioNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherIstioName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -251,7 +251,7 @@ func UpgradeRancherIstioChart(client *rancher.Client, installOptions *InstallOpt // wait for chart to be full deployed watchAppInterface, err = adminCatalogClient.Apps(RancherIstioNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherIstioName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err diff --git a/extensions/charts/rancherlogging.go b/extensions/charts/rancherlogging.go index 9e927f71..09f2fffd 100644 --- a/extensions/charts/rancherlogging.go +++ b/extensions/charts/rancherlogging.go @@ -7,9 +7,9 @@ import ( catalogv1 "github.com/rancher/rancher/pkg/apis/catalog.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/shepherd/clients/rancher/catalog" - "github.com/rancher/shepherd/extensions/defaults" - kubenamespaces "github.com/rancher/shepherd/extensions/kubeapi/namespaces" - "github.com/rancher/shepherd/extensions/namespaces" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/api/steve/catalog/types" "github.com/rancher/shepherd/pkg/wait" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -64,7 +64,7 @@ func InstallRancherLoggingChart(client *rancher.Client, installOptions *InstallO watchAppInterface, err := catalogClient.Apps(RancherLoggingNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherLoggingName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -89,7 +89,7 @@ func InstallRancherLoggingChart(client *rancher.Client, installOptions *InstallO watchAppInterface, err = catalogClient.Apps(RancherLoggingNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherLoggingCRDName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -115,7 +115,7 @@ func InstallRancherLoggingChart(client *rancher.Client, installOptions *InstallO return err } - namespaceClient := steveclient.SteveType(namespaces.NamespaceSteveType) + namespaceClient := steveclient.SteveType(stevetypes.Namespace) namespace, err := namespaceClient.ByID(RancherLoggingNamespace) if err != nil { @@ -135,11 +135,11 @@ func InstallRancherLoggingChart(client *rancher.Client, installOptions *InstallO if err != nil { return err } - adminNamespaceResource := adminDynamicClient.Resource(kubenamespaces.NamespaceGroupVersionResource).Namespace("") + adminNamespaceResource := adminDynamicClient.Resource(groupversionresources.Namespace()).Namespace("") watchNamespaceInterface, err := adminNamespaceResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherLoggingNamespace, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -161,7 +161,7 @@ func InstallRancherLoggingChart(client *rancher.Client, installOptions *InstallO // wait for chart to be full deployed watchAppInterface, err := catalogClient.Apps(RancherLoggingNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherLoggingName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err diff --git a/extensions/charts/ranchermonitoring.go b/extensions/charts/ranchermonitoring.go index ea1754bd..2b1265f4 100644 --- a/extensions/charts/ranchermonitoring.go +++ b/extensions/charts/ranchermonitoring.go @@ -10,9 +10,9 @@ import ( "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/shepherd/clients/rancher/catalog" "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/defaults" - kubenamespaces "github.com/rancher/shepherd/extensions/kubeapi/namespaces" - "github.com/rancher/shepherd/extensions/namespaces" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/api/steve/catalog/types" "github.com/rancher/shepherd/pkg/wait" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -72,7 +72,7 @@ func InstallRancherMonitoringChart(client *rancher.Client, installOptions *Insta watchAppInterface, err := catalogClient.Apps(RancherMonitoringNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherMonitoringName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -97,7 +97,7 @@ func InstallRancherMonitoringChart(client *rancher.Client, installOptions *Insta watchAppInterface, err = catalogClient.Apps(RancherMonitoringNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherMonitoringCRDName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -123,7 +123,7 @@ func InstallRancherMonitoringChart(client *rancher.Client, installOptions *Insta return err } - namespaceClient := steveclient.SteveType(namespaces.NamespaceSteveType) + namespaceClient := steveclient.SteveType(stevetypes.Namespace) namespace, err := namespaceClient.ByID(RancherMonitoringNamespace) if err != nil { @@ -143,11 +143,11 @@ func InstallRancherMonitoringChart(client *rancher.Client, installOptions *Insta if err != nil { return err } - adminNamespaceResource := adminDynamicClient.Resource(kubenamespaces.NamespaceGroupVersionResource).Namespace("") + adminNamespaceResource := adminDynamicClient.Resource(groupversionresources.Namespace()).Namespace("") watchNamespaceInterface, err := adminNamespaceResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherMonitoringNamespace, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -169,7 +169,7 @@ func InstallRancherMonitoringChart(client *rancher.Client, installOptions *Insta // wait for chart to be full deployed watchAppInterface, err := catalogClient.Apps(RancherMonitoringNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherMonitoringName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -267,7 +267,7 @@ func UpgradeRancherMonitoringChart(client *rancher.Client, installOptions *Insta // wait for chart to be in status pending upgrade watchAppInterface, err := adminCatalogClient.Apps(RancherMonitoringNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherMonitoringName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -289,7 +289,7 @@ func UpgradeRancherMonitoringChart(client *rancher.Client, installOptions *Insta // wait for chart to be full deployed watchAppInterface, err = adminCatalogClient.Apps(RancherMonitoringNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + RancherMonitoringName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err diff --git a/extensions/charts/verify.go b/extensions/charts/verify.go index 7c51745a..949d07ef 100644 --- a/extensions/charts/verify.go +++ b/extensions/charts/verify.go @@ -6,7 +6,7 @@ import ( v1 "github.com/rancher/rancher/pkg/apis/catalog.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher/catalog" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/wait" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" @@ -16,7 +16,7 @@ import ( func VerifyChartInstall(client *catalog.Client, chartNamespace, chartName string) error { watchAppInterface, err := client.Apps(chartNamespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + chartName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err diff --git a/extensions/charts/vsphereoutoftree.go b/extensions/charts/vsphereoutoftree.go index 76ecc3eb..93247168 100644 --- a/extensions/charts/vsphereoutoftree.go +++ b/extensions/charts/vsphereoutoftree.go @@ -2,6 +2,7 @@ package charts import ( "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/namespaces" "github.com/rancher/shepherd/extensions/projects" "github.com/rancher/shepherd/extensions/rke1/nodetemplates" "github.com/rancher/shepherd/pkg/api/steve/catalog/types" @@ -69,13 +70,13 @@ func InstallVsphereOutOfTreeCharts(client *rancher.Client, vsphereTemplate *node chartInstallActionPayload := &payloadOpts{ InstallOptions: *installCPIOptions, Name: vsphereCPIchartName, - Namespace: kubeSystemNamespace, + Namespace: namespaces.KubeSystem, Host: serverSetting.Value, DefaultRegistry: registrySetting.Value, } chartInstallAction, err := vsphereCPIChartInstallAction(catalogClient, - chartInstallActionPayload, vsphereTemplate, installCPIOptions, repoName, kubeSystemNamespace) + chartInstallActionPayload, vsphereTemplate, installCPIOptions, repoName, namespaces.KubeSystem) if err != nil { return err } @@ -85,7 +86,7 @@ func InstallVsphereOutOfTreeCharts(client *rancher.Client, vsphereTemplate *node return err } - err = VerifyChartInstall(catalogClient, kubeSystemNamespace, vsphereCPIchartName) + err = VerifyChartInstall(catalogClient, namespaces.KubeSystem, vsphereCPIchartName) if err != nil { return err } @@ -104,13 +105,13 @@ func InstallVsphereOutOfTreeCharts(client *rancher.Client, vsphereTemplate *node chartInstallActionPayload = &payloadOpts{ InstallOptions: *installCSIOptions, Name: vsphereCSIchartName, - Namespace: kubeSystemNamespace, + Namespace: namespaces.KubeSystem, Host: serverSetting.Value, DefaultRegistry: registrySetting.Value, } chartInstallAction, err = vsphereCSIChartInstallAction(catalogClient, chartInstallActionPayload, - vsphereTemplate, installCSIOptions, repoName, kubeSystemNamespace) + vsphereTemplate, installCSIOptions, repoName, namespaces.KubeSystem) if err != nil { return err } diff --git a/extensions/clusters/aks/nodepools.go b/extensions/clusters/aks/nodepools.go index 0ee4b861..f0b0e99f 100644 --- a/extensions/clusters/aks/nodepools.go +++ b/extensions/clusters/aks/nodepools.go @@ -5,14 +5,11 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/defaults/states" "github.com/sirupsen/logrus" kwait "k8s.io/apimachinery/pkg/util/wait" ) -const ( - active = "active" -) - // updateNodePoolQuantity is a helper method that will update the node pool with the desired quantity. func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, nodePool *NodePool) (*management.Cluster, error) { clusterResp, err := client.Management.Cluster.ByID(cluster.ID) @@ -46,7 +43,7 @@ func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, return false, err } - if clusterResp.State == active && clusterResp.NodeCount == *aksConfig.NodePools[0].Count { + if clusterResp.State == states.Active && clusterResp.NodeCount == *aksConfig.NodePools[0].Count { return true, nil } diff --git a/extensions/clusters/bundledclusters/get.go b/extensions/clusters/bundledclusters/get.go index 59957e1e..9409ad4f 100644 --- a/extensions/clusters/bundledclusters/get.go +++ b/extensions/clusters/bundledclusters/get.go @@ -2,7 +2,7 @@ package bundledclusters import ( "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" ) // Get is a method of BundledCluster that uses provisioning and management clients @@ -11,7 +11,7 @@ func (bc *BundledCluster) Get(client *rancher.Client) (cluster *BundledCluster, cluster = new(BundledCluster) cluster.Meta = bc.Meta - steveclient := client.Steve.SteveType(clusters.ProvisioningSteveResourceType) + steveclient := client.Steve.SteveType(stevetypes.Provisioning) if err != nil { return } diff --git a/extensions/clusters/bundledclusters/update.go b/extensions/clusters/bundledclusters/update.go index ede91484..8b9f0d57 100644 --- a/extensions/clusters/bundledclusters/update.go +++ b/extensions/clusters/bundledclusters/update.go @@ -9,6 +9,7 @@ import ( v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/clients/rkecli" "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" ) // Update is a method of BundledCluster that uses provisioning and management clients @@ -17,7 +18,7 @@ func (bc *BundledCluster) Update(client *rancher.Client, cUpdates *BundledCluste updatedCluster = new(BundledCluster) updatedCluster.Meta = bc.Meta - steveclient := client.Steve.SteveType(clusters.ProvisioningSteveResourceType) + steveclient := client.Steve.SteveType(stevetypes.Provisioning) if err != nil { return } diff --git a/extensions/clusters/clusters.go b/extensions/clusters/clusters.go index 14d33b3f..80ae6d8b 100644 --- a/extensions/clusters/clusters.go +++ b/extensions/clusters/clusters.go @@ -14,9 +14,11 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/provisioninginput" - "github.com/rancher/shepherd/extensions/workloads/pods" "github.com/rancher/shepherd/pkg/api/scheme" "github.com/rancher/shepherd/pkg/wait" "github.com/rancher/wrangler/pkg/summary" @@ -30,12 +32,8 @@ import ( ) const ( - active = "active" - baseline = "baseline" - externalAws = "external-aws" - FleetSteveResourceType = "fleet.cattle.io.cluster" - PodSecurityAdmissionSteveResoureType = "management.cattle.io.podsecurityadmissionconfigurationtemplate" - ProvisioningSteveResourceType = "provisioning.cattle.io.cluster" + baseline = "baseline" + externalAws = "external-aws" etcdRole = "etcd-role" controlPlaneRole = "control-plane-role" @@ -45,20 +43,17 @@ const ( kubeletArgKey = "kubelet-arg" kubeletAPIServerArgKey = "kubeapi-server-arg" kubeControllerManagerArgKey = "kube-controller-manager-arg" - cloudProviderAnnotationName = "cloud-provider-name" disableCloudController = "disable-cloud-controller" protectKernelDefaults = "protect-kernel-defaults" localcluster = "fleet-local/local" ErrMsgListDownstreamClusters = "Couldn't list downstream clusters" - clusterStateUpgrading = "upgrading" // For imported RKE2 and K3s clusters - clusterStateUpdating = "updating" // For all clusters except imported K3s and RKE2 clusterErrorStateMessage = "cluster is in error state" ) // GetV1ProvisioningClusterByName is a helper function that returns the cluster ID by name func GetV1ProvisioningClusterByName(client *rancher.Client, clusterName string) (string, error) { - clusterList, err := client.Steve.SteveType(ProvisioningSteveResourceType).List(nil) + clusterList, err := client.Steve.SteveType(stevetypes.Provisioning).List(nil) if err != nil { return "", err } @@ -162,7 +157,7 @@ func CheckServiceAccountTokenSecret(client *rancher.Client, clusterName string) // CreateRancherBaselinePSACT creates custom PSACT called rancher-baseline which sets each PSS to baseline. func CreateRancherBaselinePSACT(client *rancher.Client, psact string) error { - _, err := client.Steve.SteveType(PodSecurityAdmissionSteveResoureType).ByID(psact) + _, err := client.Steve.SteveType(stevetypes.PodSecurityAdmission).ByID(psact) if err == nil { return err } @@ -209,7 +204,7 @@ func CreateRancherBaselinePSACT(client *rancher.Client, psact string) error { }, } - _, err = client.Steve.SteveType(PodSecurityAdmissionSteveResoureType).Create(template) + _, err = client.Steve.SteveType(stevetypes.PodSecurityAdmission).Create(template) if err != nil { return err } @@ -429,8 +424,8 @@ func NewK3SRKE2ClusterConfig(clusterName, namespace string, clustersConfig *Clus if clustersConfig.CloudProvider == provisioninginput.VsphereCloudProviderName.String() { machineSelectorConfigs = append(machineSelectorConfigs, RKESystemConfigTemplate(map[string]interface{}{ - cloudProviderAnnotationName: provisioninginput.VsphereCloudProviderName.String(), - protectKernelDefaults: false, + annotations.CloudProviderName: provisioninginput.VsphereCloudProviderName.String(), + protectKernelDefaults: false, }, nil), ) @@ -500,8 +495,8 @@ func OutOfTreeSystemConfig(providerName string) (rkeConfig []rkev1.RKESystemConf } configData := map[string]interface{}{ - cloudProviderAnnotationName: providerName, - protectKernelDefaults: false, + annotations.CloudProviderName: providerName, + protectKernelDefaults: false, } rkeConfig = append(rkeConfig, RKESystemConfigTemplate(configData, nil)) @@ -515,8 +510,8 @@ func OutOfTreeSystemConfig(providerName string) (rkeConfig []rkev1.RKESystemConf // Azure deprecated 1.28+ func InTreeSystemConfig(providerName string) (rkeConfig []rkev1.RKESystemConfig) { configData := map[string]interface{}{ - cloudProviderAnnotationName: providerName, - protectKernelDefaults: false, + annotations.CloudProviderName: providerName, + protectKernelDefaults: false, } rkeConfig = append(rkeConfig, RKESystemConfigTemplate(configData, nil)) return @@ -947,7 +942,7 @@ func CreateRKE1Cluster(client *rancher.Client, rke1Cluster *management.Cluster) watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ FieldSelector: "metadata.name=" + clusterResp.ID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -969,7 +964,7 @@ func CreateRKE1Cluster(client *rancher.Client, rke1Cluster *management.Cluster) // CreateK3SRKE2Cluster is a "helper" functions that takes a rancher client, and the rke2 cluster config as parameters. This function // registers a delete cluster fuction with a wait.WatchWait to ensure the cluster is removed cleanly. func CreateK3SRKE2Cluster(client *rancher.Client, rke2Cluster *apisV1.Cluster) (*v1.SteveAPIObject, error) { - cluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).Create(rke2Cluster) + cluster, err := client.Steve.SteveType(stevetypes.Provisioning).Create(rke2Cluster) if err != nil { return nil, err } @@ -980,7 +975,7 @@ func CreateK3SRKE2Cluster(client *rancher.Client, rke2Cluster *apisV1.Cluster) ( return false, err } - _, err = client.Steve.SteveType(ProvisioningSteveResourceType).ByID(cluster.ID) + _, err = client.Steve.SteveType(stevetypes.Provisioning).ByID(cluster.ID) if err != nil { return false, nil } @@ -1005,7 +1000,7 @@ func CreateK3SRKE2Cluster(client *rancher.Client, rke2Cluster *apisV1.Cluster) ( watchInterface, err := provKubeClient.Clusters(cluster.ObjectMeta.Namespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + cluster.ObjectMeta.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { @@ -1017,7 +1012,7 @@ func CreateK3SRKE2Cluster(client *rancher.Client, rke2Cluster *apisV1.Cluster) ( return err } - err = client.Steve.SteveType(ProvisioningSteveResourceType).Delete(cluster) + err = client.Steve.SteveType(stevetypes.Provisioning).Delete(cluster) if err != nil { return err } @@ -1058,13 +1053,13 @@ func DeleteRKE1Cluster(client *rancher.Client, clusterID string) error { // DeleteK3SRKE2Cluster is a "helper" functions that takes a rancher client, and the non-rke1 cluster ID as parameters to delete // the cluster. func DeleteK3SRKE2Cluster(client *rancher.Client, clusterID string) error { - cluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID) + cluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(clusterID) if err != nil { return err } logrus.Infof("Deleting cluster %s...", cluster.Name) - err = client.Steve.SteveType(ProvisioningSteveResourceType).Delete(cluster) + err = client.Steve.SteveType(stevetypes.Provisioning).Delete(cluster) if err != nil { return err } @@ -1074,14 +1069,14 @@ func DeleteK3SRKE2Cluster(client *rancher.Client, clusterID string) error { // UpdateK3SRKE2Cluster is a "helper" functions that takes a rancher client, old rke2/k3s cluster config, and the new rke2/k3s cluster config as parameters. func UpdateK3SRKE2Cluster(client *rancher.Client, cluster *v1.SteveAPIObject, updatedCluster *apisV1.Cluster) (*v1.SteveAPIObject, error) { - updateCluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(cluster.ID) + updateCluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(cluster.ID) if err != nil { return nil, err } updatedCluster.ObjectMeta.ResourceVersion = updateCluster.ObjectMeta.ResourceVersion - cluster, err = client.Steve.SteveType(ProvisioningSteveResourceType).Update(cluster, updatedCluster) + cluster, err = client.Steve.SteveType(stevetypes.Provisioning).Update(cluster, updatedCluster) if err != nil { return nil, err } @@ -1092,7 +1087,7 @@ func UpdateK3SRKE2Cluster(client *rancher.Client, cluster *v1.SteveAPIObject, up return false, err } - clusterResp, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(cluster.ID) + clusterResp, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(cluster.ID) if err != nil { return false, err } @@ -1103,13 +1098,13 @@ func UpdateK3SRKE2Cluster(client *rancher.Client, cluster *v1.SteveAPIObject, up return false, err } - if clusterResp.ObjectMeta.State.Name == active { + if clusterResp.ObjectMeta.State.Name == states.Active { proxyClient, err := client.Steve.ProxyDownstream(clusterStatus.ClusterName) if err != nil { return false, err } - _, err = proxyClient.SteveType(pods.PodResourceSteveType).List(nil) + _, err = proxyClient.SteveType(stevetypes.Pod).List(nil) if err != nil { return false, nil } @@ -1134,7 +1129,7 @@ func WaitClusterToBeInUpgrade(client *rancher.Client, clusterID string) (err err var clusterInfo string opts := metav1.ListOptions{ FieldSelector: "metadata.name=" + clusterID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), } watchInterface, err := client.GetManagementWatchInterface(management.ClusterType, opts) @@ -1147,7 +1142,7 @@ func WaitClusterToBeInUpgrade(client *rancher.Client, clusterID string) (err err clusterInfo = logClusterInfoWithChanges(clusterID, clusterInfo, summarizedCluster) - if summarizedCluster.Transitioning && !summarizedCluster.Error && (summarizedCluster.State == clusterStateUpdating || summarizedCluster.State == clusterStateUpgrading) { + if summarizedCluster.Transitioning && !summarizedCluster.Error && (summarizedCluster.State == states.Updating || summarizedCluster.State == states.Upgrading) { return true, nil } else if summarizedCluster.Error && isClusterInaccessible(summarizedCluster.Message) { return false, nil @@ -1173,7 +1168,7 @@ func WaitClusterUntilUpgrade(client *rancher.Client, clusterID string) (err erro var clusterInfo string opts := metav1.ListOptions{ FieldSelector: "metadata.name=" + clusterID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), } watchInterfaceWaitUpgrade, err := client.GetManagementWatchInterface(management.ClusterType, opts) @@ -1252,13 +1247,13 @@ func logClusterInfoWithChanges(clusterID, clusterInfo string, summary summary.Su // WatchAndWaitForCluster is function that waits for a cluster to go unactive before checking its active state. func WatchAndWaitForCluster(client *rancher.Client, steveID string) error { var clusterResp *v1.SteveAPIObject - err := kwait.PollUntilContextTimeout(context.TODO(), 1*time.Second, defaults.TwoMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - clusterResp, err = client.Steve.SteveType(ProvisioningSteveResourceType).ByID(steveID) + err := kwait.PollUntilContextTimeout(context.TODO(), 1*time.Second, timeouts.TwoMinute, true, func(ctx context.Context) (done bool, err error) { + clusterResp, err = client.Steve.SteveType(stevetypes.Provisioning).ByID(steveID) if err != nil { return false, err } state := clusterResp.ObjectMeta.State.Name - return state != "active", nil + return state != states.Active, nil }) if err != nil { return err @@ -1276,7 +1271,7 @@ func WatchAndWaitForCluster(client *rancher.Client, steveID string) error { result, err := kubeProvisioningClient.Clusters(clusterResp.ObjectMeta.Namespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + clusterResp.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return err @@ -1288,7 +1283,7 @@ func WatchAndWaitForCluster(client *rancher.Client, steveID string) error { // GetProvisioningClusterByName is a helper function to get cluster object with the cluster name func GetProvisioningClusterByName(client *rancher.Client, clusterName string, namespace string) (*apisV1.Cluster, *v1.SteveAPIObject, error) { - clusterObj, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(namespace + "/" + clusterName) + clusterObj, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(namespace + "/" + clusterName) if err != nil { return nil, nil, err } @@ -1314,7 +1309,7 @@ func WaitForActiveRKE1Cluster(client *rancher.Client, clusterID string) error { if err != nil { return false, err } - if clusterResp.State == active { + if clusterResp.State == states.Active { return true, nil } return false, nil @@ -1327,7 +1322,7 @@ func WaitForActiveRKE1Cluster(client *rancher.Client, clusterID string) error { // ListDownstreamClusters is a helper function to get the name of the downstream clusters func ListDownstreamClusters(client *rancher.Client) (clusterNames []string, err error) { - clusterList, err := client.Steve.SteveType(ProvisioningSteveResourceType).ListAll(nil) + clusterList, err := client.Steve.SteveType(stevetypes.Provisioning).ListAll(nil) if err != nil { return nil, errors.Wrap(err, ErrMsgListDownstreamClusters) } diff --git a/extensions/clusters/eks/nodepools.go b/extensions/clusters/eks/nodepools.go index 6ce83d5a..e4232e63 100644 --- a/extensions/clusters/eks/nodepools.go +++ b/extensions/clusters/eks/nodepools.go @@ -5,14 +5,11 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/defaults/states" "github.com/sirupsen/logrus" kwait "k8s.io/apimachinery/pkg/util/wait" ) -const ( - active = "active" -) - // updateNodePoolQuantity is a helper method that will update the node pool with the desired quantity. func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, nodePool *NodeGroupConfig) (*management.Cluster, error) { clusterResp, err := client.Management.Cluster.ByID(cluster.ID) @@ -46,7 +43,7 @@ func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, return false, err } - if clusterResp.State == active && clusterResp.NodeCount == *eksConfig.NodeGroups[0].DesiredSize { + if clusterResp.State == states.Active && clusterResp.NodeCount == *eksConfig.NodeGroups[0].DesiredSize { return true, nil } diff --git a/extensions/clusters/gke/nodepools.go b/extensions/clusters/gke/nodepools.go index 347832f8..fbd74944 100644 --- a/extensions/clusters/gke/nodepools.go +++ b/extensions/clusters/gke/nodepools.go @@ -5,14 +5,11 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/defaults/states" "github.com/sirupsen/logrus" kwait "k8s.io/apimachinery/pkg/util/wait" ) -const ( - active = "active" -) - // updateNodePoolQuantity is a helper method that will update the node pool with the desired quantity. func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, nodePool *NodePool) (*management.Cluster, error) { clusterResp, err := client.Management.Cluster.ByID(cluster.ID) @@ -46,7 +43,7 @@ func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, return false, err } - if clusterResp.State == active && clusterResp.NodeCount == *gkeConfig.NodePools[0].InitialNodeCount { + if clusterResp.State == states.Active && clusterResp.NodeCount == *gkeConfig.NodePools[0].InitialNodeCount { return true, nil } diff --git a/extensions/clusters/import.go b/extensions/clusters/import.go index 1663cbfe..01fd1ca0 100644 --- a/extensions/clusters/import.go +++ b/extensions/clusters/import.go @@ -10,6 +10,7 @@ import ( "github.com/rancher/shepherd/clients/dynamic" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/defaults/namespaces" ext_unstructured "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/wait" batchv1 "k8s.io/api/batch/v1" @@ -52,10 +53,6 @@ users: ` ) -var ( - importTimeout = int64(60 * 20) -) - // ImportCluster creates a job using the given rest config that applies the import yaml from the given management cluster. func ImportCluster(client *rancher.Client, cluster *apisV1.Cluster, rest *rest.Config) error { // create a sub session to clean up after we apply the manifest @@ -109,7 +106,7 @@ func ImportCluster(client *rancher.Client, cluster *apisV1.Cluster, rest *rest.C return true, nil }) - _, err = downClient.Resource(corev1.SchemeGroupVersion.WithResource("serviceaccounts")).Namespace("kube-system").Create(context.TODO(), ext_unstructured.MustToUnstructured(sa), metav1.CreateOptions{}) + _, err = downClient.Resource(corev1.SchemeGroupVersion.WithResource("serviceaccounts")).Namespace(namespaces.KubeSystem).Create(context.TODO(), ext_unstructured.MustToUnstructured(sa), metav1.CreateOptions{}) if err != nil { return err } @@ -122,7 +119,7 @@ func ImportCluster(client *rancher.Client, cluster *apisV1.Cluster, rest *rest.C { Kind: "ServiceAccount", Name: sa.Name, - Namespace: "kube-system", + Namespace: namespaces.KubeSystem, }, }, RoleRef: rbacv1.RoleRef{ @@ -144,7 +141,7 @@ func ImportCluster(client *rancher.Client, cluster *apisV1.Cluster, rest *rest.C "config": kubeConfig, }, } - _, err = downClient.Resource(corev1.SchemeGroupVersion.WithResource("configmaps")).Namespace("kube-system").Create(context.TODO(), ext_unstructured.MustToUnstructured(cm), metav1.CreateOptions{}) + _, err = downClient.Resource(corev1.SchemeGroupVersion.WithResource("configmaps")).Namespace(namespaces.KubeSystem).Create(context.TODO(), ext_unstructured.MustToUnstructured(cm), metav1.CreateOptions{}) if err != nil { return err } @@ -199,12 +196,13 @@ func ImportCluster(client *rancher.Client, cluster *apisV1.Cluster, rest *rest.C }, }, } - _, err = downClient.Resource(batchv1.SchemeGroupVersion.WithResource("jobs")).Namespace("kube-system").Create(context.TODO(), ext_unstructured.MustToUnstructured(job), metav1.CreateOptions{}) + _, err = downClient.Resource(batchv1.SchemeGroupVersion.WithResource("jobs")).Namespace(namespaces.KubeSystem).Create(context.TODO(), ext_unstructured.MustToUnstructured(job), metav1.CreateOptions{}) if err != nil { return err } - jobWatch, err := downClient.Resource(batchv1.SchemeGroupVersion.WithResource("jobs")).Namespace("kube-system").Watch(context.TODO(), metav1.ListOptions{ + importTimeout := int64(60 * 20) + jobWatch, err := downClient.Resource(batchv1.SchemeGroupVersion.WithResource("jobs")).Namespace(namespaces.KubeSystem).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", job.Name).String(), TimeoutSeconds: &importTimeout, }) diff --git a/extensions/codecoverage/codecoverage.go b/extensions/codecoverage/codecoverage.go index 084d6019..db0b6dd6 100644 --- a/extensions/codecoverage/codecoverage.go +++ b/extensions/codecoverage/codecoverage.go @@ -9,30 +9,28 @@ import ( apiv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/extensions/kubeconfig" "github.com/rancher/shepherd/pkg/killserver" "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" ) -var podGroupVersionResource = corev1.SchemeGroupVersion.WithResource("pods") - const ( - cattleSystemNameSpace = "cattle-system" - localCluster = "local" - rancherCoverFile = "ranchercoverage" - agentCoverFile = "agentcoverage" - outputDir = "cover" + localCluster = "local" + rancherCoverFile = "ranchercoverage" + agentCoverFile = "agentcoverage" + outputDir = "cover" ) func checkServiceIsRunning(dynamicClient dynamic.Interface) error { return kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) { - _, err = dynamicClient.Resource(podGroupVersionResource).Namespace(cattleSystemNameSpace).List(context.Background(), metav1.ListOptions{}) + _, err = dynamicClient.Resource(groupversionresources.Pod()).Namespace(namespaces.CattleSystem).List(context.Background(), metav1.ListOptions{}) if k8sErrors.IsInternalError(err) || k8sErrors.IsServiceUnavailable(err) { return false, nil } else if err != nil { @@ -60,7 +58,7 @@ func killTestServices(client *rancher.Client, clusterID string, podNames []strin } for _, podName := range podNames { - _, err := kubeconfig.KubectlExec(restConfig, podName, cattleSystemNameSpace, cmd) + _, err := kubeconfig.KubectlExec(restConfig, podName, namespaces.CattleSystem, cmd) if err != nil { logrus.Errorf("error killing pod container %v", err) } @@ -84,7 +82,7 @@ func retrieveCodeCoverageFile(client *rancher.Client, clusterID, coverageFilenam fileName := fmt.Sprintf("%s%s", podName, coverageFilename) dst := fmt.Sprintf("%s/%s", outputDir, fileName) - err := kubeconfig.CopyFileFromPod(restConfig, *kubeConfig, podName, cattleSystemNameSpace, coverageFilename, dst) + err := kubeconfig.CopyFileFromPod(restConfig, *kubeConfig, podName, namespaces.CattleSystem, coverageFilename, dst) if err != nil { return err } @@ -103,7 +101,7 @@ func KillRancherTestServicesRetrieveCoverage(client *rancher.Client) error { return err } - pods, err := dynamicClient.Resource(podGroupVersionResource).Namespace(cattleSystemNameSpace).List(context.Background(), metav1.ListOptions{}) + pods, err := dynamicClient.Resource(groupversionresources.Pod()).Namespace(namespaces.CattleSystem).List(context.Background(), metav1.ListOptions{}) if err != nil { return err } @@ -132,7 +130,7 @@ func KillRancherTestServicesRetrieveCoverage(client *rancher.Client) error { // inorder for the code coverage report to be written, and then copies over the coverage reports from the pods // to a local destination. The custom code coverage rancher-agent image must be running in the downstream cluster. func KillAgentTestServicesRetrieveCoverage(client *rancher.Client) error { - clusters, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ListAll(nil) + clusters, err := client.Steve.SteveType(stevetypes.Provisioning).ListAll(nil) if err != nil { return err } @@ -151,7 +149,7 @@ func KillAgentTestServicesRetrieveCoverage(client *rancher.Client) error { continue } - pods, err := dynamicClient.Resource(podGroupVersionResource).Namespace(cattleSystemNameSpace).List(context.Background(), metav1.ListOptions{}) + pods, err := dynamicClient.Resource(groupversionresources.Pod()).Namespace(namespaces.CattleSystem).List(context.Background(), metav1.ListOptions{}) if err != nil { logrus.Errorf("could not list pods") continue diff --git a/extensions/defaults/annotations/annotations.go b/extensions/defaults/annotations/annotations.go new file mode 100644 index 00000000..87afec66 --- /dev/null +++ b/extensions/defaults/annotations/annotations.go @@ -0,0 +1,14 @@ +package annotations + +const ( + Machine = "cluster.x-k8s.io/machine" + ExternalIp = "rke.cattle.io/external-ip" + InternalIp = "alpha.kubernetes.io/provided-node-ip" + ControlPlaneLeader = "control-plane.alpha.kubernetes.io/leader" + CloudProviderName = "cloud-provider-name" + UiSourceRepo = "catalog.cattle.io/ui-source-repo" + UiSourceRepoType = "catalog.cattle.io/ui-source-repo-type" + ContainerResourceLimit = "field.cattle.io/containerDefaultResourceLimit" + ProjectId = "field.cattle.io/projectId" + Description = "field.cattle.io/description" +) diff --git a/extensions/defaults/defaults.go b/extensions/defaults/defaults.go index 91737a63..92453191 100644 --- a/extensions/defaults/defaults.go +++ b/extensions/defaults/defaults.go @@ -1,16 +1,45 @@ package defaults -import "time" - -var ( - WatchTimeoutSeconds = int64(60 * 30) // 30 minutes. - FiveHundredMillisecondTimeout = 500 * time.Millisecond - FiveSecondTimeout = 5 * time.Second - TenSecondTimeout = 10 * time.Second - OneMinuteTimeout = 1 * time.Minute - TwoMinuteTimeout = 2 * time.Minute - FiveMinuteTimeout = 5 * time.Minute - TenMinuteTimeout = 10 * time.Minute - FifteenMinuteTimeout = 15 * time.Minute - ThirtyMinuteTimeout = 30 * time.Minute +import ( + "errors" + "os" + + "sigs.k8s.io/yaml" ) + +// LoadDefault is a helper to load objects that are stored in a yaml files into their go equivalent struct objects +func LoadDefault(defaultFile string, defaultName string, defaultObject interface{}) error { + if defaultFile == "" { + yaml.Unmarshal([]byte("{}"), defaultFile) + err := errors.New("No default file found") + return err + } + + allString, err := os.ReadFile(defaultFile) + if err != nil { + panic(err) + } + + var all map[string]map[string]interface{} + err = yaml.Unmarshal(allString, &all) + if err != nil { + panic(err) + } + + var keys []string + for key := range all[defaultName] { + keys = append(keys, key) + } + scoped := all[defaultName][keys[0]] + scopedString, err := yaml.Marshal(scoped) + if err != nil { + panic(err) + } + + err = yaml.Unmarshal(scopedString, &defaultObject) + if err != nil { + panic(err) + } + + return nil +} diff --git a/extensions/defaults/labels/labels.go b/extensions/defaults/labels/labels.go new file mode 100644 index 00000000..b19b0c85 --- /dev/null +++ b/extensions/defaults/labels/labels.go @@ -0,0 +1,11 @@ +package labels + +const ( + EtcdRole = "node-role.kubernetes.io/etcd" + ControlplaneRole = "node-role.kubernetes.io/control-plane" + WorkerRole = "node-role.kubernetes.io/worker" + WorkloadSelector = "workload.user.cattle.io/workloadselector" + RtbOwnerUpdated = "authz.cluster.cattle.io/rtb-owner-updated" + InitNode = "rke.cattle.io/init-node" + MachineName = "rke.cattle.io/machine-name" +) diff --git a/extensions/defaults/namespaces/namespaces.go b/extensions/defaults/namespaces/namespaces.go new file mode 100644 index 00000000..508f53eb --- /dev/null +++ b/extensions/defaults/namespaces/namespaces.go @@ -0,0 +1,8 @@ +package namespaces + +const ( + KubeSystem = "kube-system" + CattleSystem = "cattle-system" + Fleet = "fleet-default" + Default = "default" +) diff --git a/extensions/defaults/schema/groupversionresources/groupversionresources.go b/extensions/defaults/schema/groupversionresources/groupversionresources.go new file mode 100644 index 00000000..5246a60a --- /dev/null +++ b/extensions/defaults/schema/groupversionresources/groupversionresources.go @@ -0,0 +1,222 @@ +package groupversionresources + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func Node() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "nodes", + } +} + +func Pod() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "pods", + } +} + +func ConfigMap() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "configmaps", + } +} + +func CustomResourceDefinition() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + } +} + +func Ingress() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "ingresses", + } +} + +func Project() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "management.cattle.io", + Version: "v3", + Resource: "projects", + } +} + +func Role() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Resource: "roles", + } +} + +func ClusterRole() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Resource: "clusterroles", + } +} + +func RoleBinding() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Resource: "rolebindings", + } +} + +func ClusterRoleBinding() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Resource: "clusterrolebindings", + } +} + +func GlobalRole() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "management.cattle.io", + Version: "v3", + Resource: "globalroles", + } +} + +func GlobalRoleBinding() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "management.cattle.io", + Version: "v3", + Resource: "globalrolebindings", + } +} + +func ClusterRoleTemplateBinding() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "management.cattle.io", + Version: "v3", + Resource: "clusterroletemplatebindings", + } +} + +func RoleTemplate() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "management.cattle.io", + Version: "v3", + Resource: "roletemplates", + } +} + +func ProjectRoleTemplateBinding() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "management.cattle.io", + Version: "v3", + Resource: "projectroletemplatebindings", + } +} + +func ResourceQuota() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "resourcequotas", + } +} + +func Secret() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "secrets", + } +} + +func Service() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "services", + } +} + +func StorageClass() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "storage.k8s.io", + Version: "v1", + Resource: "storageclasses", + } +} + +func Token() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "management.cattle.io", + Version: "v3", + Resource: "tokens", + } +} + +func PersistentVolumeClaim() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "persistentvolumeclaims", + } +} + +func PersistentVolume() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "persistentvolumes", + } +} + +func Namespace() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "", + Version: "v1", + Resource: "namespaces", + } +} + +func Daemonset() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "daemonsets", + } +} + +func Deployment() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + } +} + +func Job() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "batch", + Version: "v1", + Resource: "jobs", + } +} + +func CronJob() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "batch", + Version: "v1beta1", + Resource: "cronjobs", + } +} diff --git a/extensions/defaults/states/states.go b/extensions/defaults/states/states.go new file mode 100644 index 00000000..4400eef1 --- /dev/null +++ b/extensions/defaults/states/states.go @@ -0,0 +1,10 @@ +package states + +const ( + Active = "active" + Error = "error" + Running = "running" + Upgrading = "upgrading" + Updating = "updating" + Waiting = "waiting" +) diff --git a/extensions/defaults/stevetypes/stevetypes.go b/extensions/defaults/stevetypes/stevetypes.go index cb2b6bb2..71274000 100644 --- a/extensions/defaults/stevetypes/stevetypes.go +++ b/extensions/defaults/stevetypes/stevetypes.go @@ -1,7 +1,25 @@ package stevetypes const ( - Provisioning = "provisioning.cattle.io.cluster" - EtcdSnapshot = "rke.cattle.io.etcdsnapshot" - FleetCluster = "fleet.cattle.io.cluster" + Provisioning = "provisioning.cattle.io.cluster" + EtcdSnapshot = "rke.cattle.io.etcdsnapshot" + FleetCluster = "fleet.cattle.io.cluster" + ClusterRoleBinding = "rbac.authorization.k8s.io.clusterrolebinding" + PodSecurityAdmission = "management.cattle.io.podsecurityadmissionconfigurationtemplate" + GlobalRoleBinding = "management.cattle.io.globalrolebinding" + ManagementSetting = "management.cattle.io.setting" + CoordinationLease = "coordination.k8s.io.lease" + ClusterRepo = "catalog.cattle.io.clusterrepo" + Apps = "catalog.cattle.io.apps" + Machine = "cluster.x-k8s.io.machine" + Ingress = "networking.k8s.io.ingress" + Deployment = "apps.deployment" + Daemonset = "apps.daemonset" + Service = "service" + ServiceAccount = "serviceaccount" + Node = "node" + Pod = "pod" + Namespace = "namespace" + Configmap = "configmap" + ResourceQuota = "resourcequota" ) diff --git a/extensions/defaults/timeouts/timeouts.go b/extensions/defaults/timeouts/timeouts.go new file mode 100644 index 00000000..9dbe26b0 --- /dev/null +++ b/extensions/defaults/timeouts/timeouts.go @@ -0,0 +1,22 @@ +package timeouts + +import "time" + +func WatchTimeout(mins time.Duration) *int64 { + timeout := int64(60 * int(mins)) + return &timeout +} + +const ( + FiveHundredMillisecond = 500 * time.Millisecond + FiveSecond = 5 * time.Second + TenSecond = 10 * time.Second + OneMinute = 1 * time.Minute + TwoMinute = 2 * time.Minute + ThreeMinute = 3 * time.Minute + FiveMinute = 5 * time.Minute + TenMinute = 10 * time.Minute + FifteenMinute = 15 * time.Minute + TwentyMinute = 20 * time.Minute + ThirtyMinute = 30 * time.Minute +) diff --git a/extensions/etcdsnapshot/etcdsnapshot.go b/extensions/etcdsnapshot/etcdsnapshot.go index 18a6a2ed..83037b00 100644 --- a/extensions/etcdsnapshot/etcdsnapshot.go +++ b/extensions/etcdsnapshot/etcdsnapshot.go @@ -14,8 +14,10 @@ import ( management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" rancherv1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/states" "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/kubeapi/nodes" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,10 +25,7 @@ import ( ) const ( - ProvisioningSteveResouceType = "provisioning.cattle.io.cluster" - fleetNamespace = "fleet-default" - localClusterName = "local" - active = "active" + localClusterName = "local" ) func MatchNodeToAnyEtcdRole(client *rancher.Client, clusterID string) (int, *management.Node) { @@ -118,7 +117,7 @@ func CreateRKE1Snapshot(client *rancher.Client, clusterName string) error { return err } - err = wait.Poll(1*time.Second, defaults.FiveMinuteTimeout, func() (bool, error) { + err = wait.Poll(1*time.Second, timeouts.FiveMinute, func() (bool, error) { snapshotSteveObjList, err := client.Management.EtcdBackup.ListAll(&types.ListOpts{ Filters: map[string]interface{}{ "clusterId": clusterID, @@ -134,7 +133,7 @@ func CreateRKE1Snapshot(client *rancher.Client, clusterName string) error { return false, nil } - if snapshotObj.State != active { + if snapshotObj.State != states.Active { return false, nil } } @@ -151,7 +150,7 @@ func CreateRKE1Snapshot(client *rancher.Client, clusterName string) error { // CreateRKE2K3SSnapshot is a helper function to create a snapshot on an RKE2 or k3s cluster. Returns error if any. func CreateRKE2K3SSnapshot(client *rancher.Client, clusterName string) error { - clusterObject, clusterSteveObject, err := clusters.GetProvisioningClusterByName(client, clusterName, fleetNamespace) + clusterObject, clusterSteveObject, err := clusters.GetProvisioningClusterByName(client, clusterName, namespaces.Fleet) if err != nil { return err } @@ -175,29 +174,29 @@ func CreateRKE2K3SSnapshot(client *rancher.Client, clusterName string) error { } logrus.Infof("Creating snapshot...") - _, err = client.Steve.SteveType(clusters.ProvisioningSteveResourceType).Update(clusterSteveObject, clusterObject) + _, err = client.Steve.SteveType(stevetypes.Provisioning).Update(clusterSteveObject, clusterObject) if err != nil { return err } - err = wait.Poll(1*time.Second, defaults.FiveMinuteTimeout, func() (bool, error) { - snapshotSteveObjList, err := client.Steve.SteveType("rke.cattle.io.etcdsnapshot").List(nil) + err = wait.Poll(1*time.Second, timeouts.FiveMinute, func() (bool, error) { + snapshotSteveObjList, err := client.Steve.SteveType(stevetypes.EtcdSnapshot).List(nil) if err != nil { return false, nil } - _, clusterSteveObject, err := clusters.GetProvisioningClusterByName(client, clusterName, fleetNamespace) + _, clusterSteveObject, err := clusters.GetProvisioningClusterByName(client, clusterName, namespaces.Fleet) if err != nil { return false, nil } for _, snapshot := range snapshotSteveObjList.Data { - snapshotObj, err := client.Steve.SteveType("rke.cattle.io.etcdsnapshot").ByID(snapshot.ID) + snapshotObj, err := client.Steve.SteveType(stevetypes.EtcdSnapshot).ByID(snapshot.ID) if err != nil { return false, nil } - if snapshotObj.ObjectMeta.State.Name == active && clusterSteveObject.ObjectMeta.State.Name == active { + if snapshotObj.ObjectMeta.State.Name == states.Active && clusterSteveObject.ObjectMeta.State.Name == states.Active { logrus.Infof("All snapshots in the cluster are in an active state!") return true, nil } @@ -240,13 +239,13 @@ func RestoreRKE1Snapshot(client *rancher.Client, clusterName string, snapshotRes return err } - err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { + err = wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, timeouts.ThirtyMinute, true, func(ctx context.Context) (done bool, err error) { clusterResp, err := client.Management.Cluster.ByID(clusterID) if err != nil { return false, nil } - if clusterResp.State == active { + if clusterResp.State == states.Active { return true, nil } @@ -261,7 +260,7 @@ func RestoreRKE1Snapshot(client *rancher.Client, clusterName string, snapshotRes // RestoreRKE2K3SSnapshot is a helper function to restore a snapshot on an RKE2 or k3s cluster. Returns error if any. func RestoreRKE2K3SSnapshot(client *rancher.Client, clusterName string, snapshotRestore *rkev1.ETCDSnapshotRestore, initialControlPlaneValue, initialWorkerValue string) error { - clusterObject, existingSteveAPIObject, err := clusters.GetProvisioningClusterByName(client, clusterName, fleetNamespace) + clusterObject, existingSteveAPIObject, err := clusters.GetProvisioningClusterByName(client, clusterName, namespaces.Fleet) if err != nil { return err } @@ -271,7 +270,7 @@ func RestoreRKE2K3SSnapshot(client *rancher.Client, clusterName string, snapshot clusterObject.Spec.RKEConfig.UpgradeStrategy.WorkerConcurrency = initialWorkerValue logrus.Infof("Restoring snapshot: %v", snapshotRestore.Name) - _, err = client.Steve.SteveType(ProvisioningSteveResouceType).Update(existingSteveAPIObject, clusterObject) + _, err = client.Steve.SteveType(stevetypes.Provisioning).Update(existingSteveAPIObject, clusterObject) if err != nil { return err } diff --git a/extensions/ingresses/ingresses.go b/extensions/ingresses/ingresses.go index 9714f50b..e2073199 100644 --- a/extensions/ingresses/ingresses.go +++ b/extensions/ingresses/ingresses.go @@ -11,7 +11,8 @@ import ( "github.com/pkg/errors" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/workloads/pods" "github.com/sirupsen/logrus" networking "k8s.io/api/networking/v1" @@ -20,7 +21,6 @@ import ( const ( IngressSteveType = "networking.k8s.io.ingress" - pod = "pod" IngressNginx = "ingress-nginx" RancherWebhook = "rancher-webhook" ) @@ -76,8 +76,8 @@ func IsIngressExternallyAccessible(client *rancher.Client, hostname string, path // CreateIngress will create an Ingress object in the downstream cluster. func CreateIngress(client *v1.Client, ingressName string, ingressTemplate networking.Ingress) (*v1.SteveAPIObject, error) { - podClient := client.SteveType(pod) - err := kwait.PollUntilContextTimeout(context.TODO(), 15*time.Second, defaults.FiveMinuteTimeout, true, func(context.Context) (done bool, err error) { + podClient := client.SteveType(stevetypes.Pod) + err := kwait.PollUntilContextTimeout(context.TODO(), 15*time.Second, timeouts.FiveMinute, true, func(context.Context) (done bool, err error) { newPods, err := podClient.List(nil) if err != nil { return false, nil diff --git a/extensions/kubeapi/configmaps/configmaps.go b/extensions/kubeapi/configmaps/configmaps.go index 18244b8d..fd492ae6 100644 --- a/extensions/kubeapi/configmaps/configmaps.go +++ b/extensions/kubeapi/configmaps/configmaps.go @@ -4,26 +4,19 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + defaultAnnotations "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" coreV1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// ConfigMapGroupVersionResource is the required Group Version Resource for accessing config maps in a cluster, -// using the dynamic client. -var ConfigMapGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "configmaps", -} - // CreateConfigMap is a helper function that uses the dynamic client to create a config map on a namespace for a specific cluster. // It registers a delete fuction. func CreateConfigMap(client *rancher.Client, clusterName, configMapName, description, namespace string, data, labels, annotations map[string]string) (*coreV1.ConfigMap, error) { // ConfigMap object for a namespace in a cluster - annotations["field.cattle.io/description"] = description + annotations[defaultAnnotations.Description] = description configMap := &coreV1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: configMapName, @@ -39,7 +32,7 @@ func CreateConfigMap(client *rancher.Client, clusterName, configMapName, descrip return nil, err } - configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + configMapResource := dynamicClient.Resource(groupversionresources.ConfigMap()).Namespace(namespace) unstructuredResp, err := configMapResource.Create(context.TODO(), unstructured.MustToUnstructured(configMap), metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/customresourcedefinitions/customresourcedefinitions.go b/extensions/kubeapi/customresourcedefinitions/customresourcedefinitions.go index d24233b7..86f4e36e 100644 --- a/extensions/kubeapi/customresourcedefinitions/customresourcedefinitions.go +++ b/extensions/kubeapi/customresourcedefinitions/customresourcedefinitions.go @@ -4,15 +4,8 @@ import ( "strings" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" ) -var CustomResourceDefinitions = schema.GroupVersionResource{ - Group: "apiextensions.k8s.io", - Version: "v1", - Resource: "customresourcedefinitions", -} - // gets a list of names of custom resource definitions that contain the input string name from an Unstructured List func GetCustomResourceDefinitionsListByName(CRDList *unstructured.UnstructuredList, name string) []string { var CRDNameList []string diff --git a/extensions/kubeapi/customresourcedefinitions/delete.go b/extensions/kubeapi/customresourcedefinitions/delete.go index 096490fa..8d3592e7 100644 --- a/extensions/kubeapi/customresourcedefinitions/delete.go +++ b/extensions/kubeapi/customresourcedefinitions/delete.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -15,7 +16,7 @@ func DeleteCustomResourceDefinition(client *rancher.Client, clusterID string, na return err } - customResourceDefinitionResource := dynamicClient.Resource(CustomResourceDefinitions).Namespace(namespace) + customResourceDefinitionResource := dynamicClient.Resource(groupversionresources.CustomResourceDefinition()).Namespace(namespace) err = customResourceDefinitionResource.Delete(context.TODO(), name, metav1.DeleteOptions{}) @@ -29,7 +30,7 @@ func BatchDeleteCustomResourceDefinition(client *rancher.Client, clusterID strin return err } - customResourceDefinitionResource := dynamicClient.Resource(CustomResourceDefinitions).Namespace(namespace) + customResourceDefinitionResource := dynamicClient.Resource(groupversionresources.CustomResourceDefinition()).Namespace(namespace) var errs error for _, crd := range list { diff --git a/extensions/kubeapi/customresourcedefinitions/list.go b/extensions/kubeapi/customresourcedefinitions/list.go index 968fcaca..69070211 100644 --- a/extensions/kubeapi/customresourcedefinitions/list.go +++ b/extensions/kubeapi/customresourcedefinitions/list.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -15,7 +16,7 @@ func ListCustomResourceDefinitions(client *rancher.Client, clusterID string, nam return nil, err } - customResourceDefinitionResource := dynamicClient.Resource(CustomResourceDefinitions).Namespace(namespace) + customResourceDefinitionResource := dynamicClient.Resource(groupversionresources.CustomResourceDefinition()).Namespace(namespace) CRDs, err := customResourceDefinitionResource.List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err diff --git a/extensions/kubeapi/ingresses/create.go b/extensions/kubeapi/ingresses/create.go index 987c49ce..355ca065 100644 --- a/extensions/kubeapi/ingresses/create.go +++ b/extensions/kubeapi/ingresses/create.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" networkingv1 "k8s.io/api/networking/v1" @@ -25,7 +26,7 @@ func CreateIngress(client *rancher.Client, clusterID, ingressName, namespace str Spec: *ingressSpec, } - ingressResource := dynamicClient.Resource(IngressesGroupVersionResource).Namespace(namespace) + ingressResource := dynamicClient.Resource(groupversionresources.Ingress()).Namespace(namespace) unstructuredResp, err := ingressResource.Create(context.TODO(), unstructured.MustToUnstructured(ingress), metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/ingresses/ingresses.go b/extensions/kubeapi/ingresses/ingresses.go index bc3d832b..7990b447 100644 --- a/extensions/kubeapi/ingresses/ingresses.go +++ b/extensions/kubeapi/ingresses/ingresses.go @@ -4,17 +4,8 @@ import ( "github.com/rancher/shepherd/clients/rancher" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// IngressesGroupVersionResource is the required Group Version Resource for accessing ingresses in a cluster, -// using the dynamic client. -var IngressesGroupVersionResource = schema.GroupVersionResource{ - Group: "networking.k8s.io", - Version: "v1", - Resource: "ingresses", -} - // GetIngressByName is a helper function that returns the ingress by name in a specific cluster, uses ListIngresses to get the ingress. func GetIngressByName(client *rancher.Client, clusterID, namespaceName, ingressName string) (*networkingv1.Ingress, error) { var ingress *networkingv1.Ingress diff --git a/extensions/kubeapi/ingresses/list.go b/extensions/kubeapi/ingresses/list.go index bbbbae9e..08bee8d9 100644 --- a/extensions/kubeapi/ingresses/list.go +++ b/extensions/kubeapi/ingresses/list.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +24,7 @@ func ListIngresses(client *rancher.Client, clusterID, namespace string, listOpts return nil, err } - ingressResource := dynamicClient.Resource(IngressesGroupVersionResource).Namespace(namespace) + ingressResource := dynamicClient.Resource(groupversionresources.Ingress()).Namespace(namespace) ingresses, err := ingressResource.List(context.TODO(), listOpts) if err != nil { return nil, err diff --git a/extensions/kubeapi/namespaces/create.go b/extensions/kubeapi/namespaces/create.go index a4f484cf..71d32454 100644 --- a/extensions/kubeapi/namespaces/create.go +++ b/extensions/kubeapi/namespaces/create.go @@ -5,7 +5,9 @@ import ( "fmt" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/defaults" + defaultAnnotations "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" "github.com/rancher/shepherd/pkg/wait" @@ -25,12 +27,12 @@ func CreateNamespace(client *rancher.Client, clusterID, projectName, namespaceNa } if containerDefaultResourceLimit != "" { - annotations["field.cattle.io/containerDefaultResourceLimit"] = containerDefaultResourceLimit + annotations[defaultAnnotations.ContainerResourceLimit] = containerDefaultResourceLimit } if projectName != "" { annotationValue := clusterID + ":" + projectName - annotations["field.cattle.io/projectId"] = annotationValue + annotations[defaultAnnotations.ProjectId] = annotationValue } namespace := &coreV1.Namespace{ @@ -56,7 +58,7 @@ func CreateNamespace(client *rancher.Client, clusterID, projectName, namespaceNa return nil, err } - namespaceResource := dynamicClient.Resource(NamespaceGroupVersionResource).Namespace("") + namespaceResource := dynamicClient.Resource(groupversionresources.Namespace()).Namespace("") unstructuredResp, err := namespaceResource.Create(context.TODO(), unstructured.MustToUnstructured(namespace), metav1.CreateOptions{}) if err != nil { @@ -67,7 +69,7 @@ func CreateNamespace(client *rancher.Client, clusterID, projectName, namespaceNa clusterRoleWatch, err := clusterRoleResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + fmt.Sprintf("%s-namespaces-edit", projectName), - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { @@ -105,10 +107,10 @@ func CreateNamespace(client *rancher.Client, clusterID, projectName, namespaceNa return err } - adminNamespaceResource := adminDynamicClient.Resource(NamespaceGroupVersionResource).Namespace("") + adminNamespaceResource := adminDynamicClient.Resource(groupversionresources.Namespace()).Namespace("") watchInterface, err := adminNamespaceResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + unstructuredResp.GetName(), - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { diff --git a/extensions/kubeapi/namespaces/list.go b/extensions/kubeapi/namespaces/list.go index dc04d818..f899b2d3 100644 --- a/extensions/kubeapi/namespaces/list.go +++ b/extensions/kubeapi/namespaces/list.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +24,7 @@ func ListNamespaces(client *rancher.Client, clusterID string, listOpts metav1.Li return nil, err } - namespaceResource := dynamicClient.Resource(NamespaceGroupVersionResource).Namespace("") + namespaceResource := dynamicClient.Resource(groupversionresources.Namespace()).Namespace("") namespaces, err := namespaceResource.List(context.TODO(), listOpts) if err != nil { return nil, err diff --git a/extensions/kubeapi/namespaces/namespaces.go b/extensions/kubeapi/namespaces/namespaces.go index 3a8e75d5..ef904434 100644 --- a/extensions/kubeapi/namespaces/namespaces.go +++ b/extensions/kubeapi/namespaces/namespaces.go @@ -5,20 +5,12 @@ import ( "fmt" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// NamespaceGroupVersionResource is the required Group Version Resource for accessing namespaces in a cluster, -// using the dynamic client. -var NamespaceGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "namespaces", -} - // ContainerDefaultResourceLimit sets the container default resource limit in a string // limitsCPU and requestsCPU in form of "3m" // limitsMemory and requestsMemory in the form of "3Mi" @@ -37,7 +29,7 @@ func GetNamespaceByName(client *rancher.Client, clusterID, namespaceName string) return nil, err } - namespaceResource := dynamicClient.Resource(NamespaceGroupVersionResource).Namespace("") + namespaceResource := dynamicClient.Resource(groupversionresources.Namespace()).Namespace("") unstructuredNamespace, err := namespaceResource.Get(context.TODO(), namespaceName, metav1.GetOptions{}) if err != nil { return nil, err diff --git a/extensions/kubeapi/nodes/nodes.go b/extensions/kubeapi/nodes/nodes.go index 494914c5..ccd34391 100644 --- a/extensions/kubeapi/nodes/nodes.go +++ b/extensions/kubeapi/nodes/nodes.go @@ -4,20 +4,12 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// NodeGroupVersionResource is the required Group Version Resource for accessing nodes in a cluster, -// using the dynamic client. -var NodeGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "nodes", -} - // GetNodes returns nodes with metav1.TypeMeta, metav1.ObjectMeta, NodeSpec, and NodeStatus to be used to gather more information from nodes func GetNodes(client *rancher.Client, clusterID string, listOpts metav1.ListOptions) ([]corev1.Node, error) { var nodesList []corev1.Node @@ -27,7 +19,7 @@ func GetNodes(client *rancher.Client, clusterID string, listOpts metav1.ListOpti return nil, err } - nodeResource := dynamicClient.Resource(NodeGroupVersionResource) + nodeResource := dynamicClient.Resource(groupversionresources.Node()) nodes, err := nodeResource.List(context.TODO(), listOpts) if err != nil { return nil, err diff --git a/extensions/kubeapi/projects/create.go b/extensions/kubeapi/projects/create.go index 7f658df0..364cf1a5 100644 --- a/extensions/kubeapi/projects/create.go +++ b/extensions/kubeapi/projects/create.go @@ -5,6 +5,7 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,7 +18,7 @@ func CreateProject(client *rancher.Client, project *v3.Project) (*v3.Project, er return nil, err } - projectResource := dynamicClient.Resource(ProjectGroupVersionResource).Namespace(project.Namespace) + projectResource := dynamicClient.Resource(groupversionresources.Project()).Namespace(project.Namespace) unstructuredResp, err := projectResource.Create(context.TODO(), unstructured.MustToUnstructured(project), metav1.CreateOptions{}) if err != nil { return nil, err diff --git a/extensions/kubeapi/projects/delete.go b/extensions/kubeapi/projects/delete.go index 13a704eb..8faf254f 100644 --- a/extensions/kubeapi/projects/delete.go +++ b/extensions/kubeapi/projects/delete.go @@ -4,7 +4,8 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/timeouts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kwait "k8s.io/apimachinery/pkg/util/wait" ) @@ -16,14 +17,14 @@ func DeleteProject(client *rancher.Client, projectNamespace string, projectName return err } - projectResource := dynamicClient.Resource(ProjectGroupVersionResource).Namespace(projectNamespace) + projectResource := dynamicClient.Resource(groupversionresources.Project()).Namespace(projectNamespace) err = projectResource.Delete(context.TODO(), projectName, metav1.DeleteOptions{}) if err != nil { return err } - err = kwait.Poll(defaults.FiveHundredMillisecondTimeout, defaults.TenSecondTimeout, func() (done bool, err error) { + err = kwait.Poll(timeouts.FiveHundredMillisecond, timeouts.TenSecond, func() (done bool, err error) { projectList, err := ListProjects(client, projectNamespace, metav1.ListOptions{ FieldSelector: "metadata.name=" + projectName, }) diff --git a/extensions/kubeapi/projects/list.go b/extensions/kubeapi/projects/list.go index df20de39..1d36646b 100644 --- a/extensions/kubeapi/projects/list.go +++ b/extensions/kubeapi/projects/list.go @@ -5,6 +5,7 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -16,7 +17,7 @@ func ListProjects(client *rancher.Client, namespace string, listOpt metav1.ListO return nil, err } - unstructuredList, err := dynamicClient.Resource(ProjectGroupVersionResource).Namespace(namespace).List(context.Background(), listOpt) + unstructuredList, err := dynamicClient.Resource(groupversionresources.Project()).Namespace(namespace).List(context.Background(), listOpt) if err != nil { return nil, err } diff --git a/extensions/kubeapi/projects/projects.go b/extensions/kubeapi/projects/projects.go index ec673148..3084b090 100644 --- a/extensions/kubeapi/projects/projects.go +++ b/extensions/kubeapi/projects/projects.go @@ -1,24 +1,12 @@ package projects -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - const ( - Admin = "admin" - StandardUser = "user" - DefaultNamespace = "fleet-default" - RancherNamespace = "cattle-system" - LocalCluster = "local" - Projects = "projects" - ProjectIDAnnotation = "field.cattle.io/projectId" - GroupName = "management.cattle.io" - Version = "v3" + Admin = "admin" + StandardUser = "user" + DefaultNamespace = "fleet-default" + RancherNamespace = "cattle-system" + LocalCluster = "local" + Projects = "projects" + GroupName = "management.cattle.io" + Version = "v3" ) - -// ProjectGroupVersionResource is the required Group Version Resource for accessing projects in a cluster, using the dynamic client. -var ProjectGroupVersionResource = schema.GroupVersionResource{ - Group: GroupName, - Version: Version, - Resource: Projects, -} diff --git a/extensions/kubeapi/projects/update.go b/extensions/kubeapi/projects/update.go index 5da64e69..e45f92e5 100644 --- a/extensions/kubeapi/projects/update.go +++ b/extensions/kubeapi/projects/update.go @@ -5,6 +5,7 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,7 +18,7 @@ func UpdateProject(client *rancher.Client, existingProject *v3.Project, updatedP return nil, err } - projectResource := dynamicClient.Resource(ProjectGroupVersionResource).Namespace(existingProject.Namespace) + projectResource := dynamicClient.Resource(groupversionresources.Project()).Namespace(existingProject.Namespace) projectUnstructured, err := projectResource.Get(context.TODO(), existingProject.Name, metav1.GetOptions{}) if err != nil { diff --git a/extensions/kubeapi/rbac/create.go b/extensions/kubeapi/rbac/create.go index c6c2b690..396e40b1 100644 --- a/extensions/kubeapi/rbac/create.go +++ b/extensions/kubeapi/rbac/create.go @@ -5,6 +5,7 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" rbacv1 "k8s.io/api/rbac/v1" @@ -18,7 +19,7 @@ func CreateRole(client *rancher.Client, clusterName string, role *rbacv1.Role) ( return nil, err } - roleResource := dynamicClient.Resource(RoleGroupVersionResource).Namespace(role.Namespace) + roleResource := dynamicClient.Resource(groupversionresources.Role()).Namespace(role.Namespace) unstructuredResp, err := roleResource.Create(context.Background(), unstructured.MustToUnstructured(role), metav1.CreateOptions{}) if err != nil { @@ -54,7 +55,7 @@ func CreateRoleBinding(client *rancher.Client, clusterName, roleBindingName, nam }, } - roleBindingResource := dynamicClient.Resource(RoleBindingGroupVersionResource).Namespace(namespace) + roleBindingResource := dynamicClient.Resource(groupversionresources.RoleBinding()).Namespace(namespace) unstructuredResp, err := roleBindingResource.Create(context.Background(), unstructured.MustToUnstructured(roleBinding), metav1.CreateOptions{}) if err != nil { @@ -77,7 +78,7 @@ func CreateGlobalRole(client *rancher.Client, globalRole *v3.GlobalRole) (*v3.Gl return nil, err } - globalRoleResource := dynamicClient.Resource(GlobalRoleGroupVersionResource) + globalRoleResource := dynamicClient.Resource(groupversionresources.GlobalRole()) unstructuredResp, err := globalRoleResource.Create(context.TODO(), unstructured.MustToUnstructured(globalRole), metav1.CreateOptions{}) if err != nil { return nil, err @@ -99,7 +100,7 @@ func CreateGlobalRoleBinding(client *rancher.Client, globalRoleBinding *v3.Globa return nil, err } - globalRoleBindingResource := dynamicClient.Resource(GlobalRoleBindingGroupVersionResource) + globalRoleBindingResource := dynamicClient.Resource(groupversionresources.GlobalRoleBinding()) unstructuredResp, err := globalRoleBindingResource.Create(context.TODO(), unstructured.MustToUnstructured(globalRoleBinding), metav1.CreateOptions{}) if err != nil { return nil, err @@ -121,7 +122,7 @@ func CreateRoleTemplate(client *rancher.Client, roleTemplate *v3.RoleTemplate) ( return nil, err } - roleTemplateResource := dynamicClient.Resource(RoleTemplateGroupVersionResource) + roleTemplateResource := dynamicClient.Resource(groupversionresources.RoleTemplate()) unstructuredResp, err := roleTemplateResource.Create(context.Background(), unstructured.MustToUnstructured(roleTemplate), metav1.CreateOptions{}) if err != nil { return nil, err @@ -143,7 +144,7 @@ func CreateProjectRoleTemplateBinding(client *rancher.Client, prtb *v3.ProjectRo return nil, err } - projectRoleTemplateBindingResource := dynamicClient.Resource(ProjectRoleTemplateBindingGroupVersionResource).Namespace(prtb.Namespace) + projectRoleTemplateBindingResource := dynamicClient.Resource(groupversionresources.ProjectRoleTemplateBinding()).Namespace(prtb.Namespace) unstructuredResp, err := projectRoleTemplateBindingResource.Create(context.TODO(), unstructured.MustToUnstructured(prtb), metav1.CreateOptions{}) if err != nil { return nil, err @@ -166,7 +167,7 @@ func CreateClusterRoleTemplateBinding(client *rancher.Client, crtb *v3.ClusterRo return nil, err } - clusterRoleTemplateBindingResource := dynamicClient.Resource(ClusterRoleTemplateBindingGroupVersionResource).Namespace(crtb.Namespace) + clusterRoleTemplateBindingResource := dynamicClient.Resource(groupversionresources.ClusterRoleTemplateBinding()).Namespace(crtb.Namespace) unstructuredResp, err := clusterRoleTemplateBindingResource.Create(context.Background(), unstructured.MustToUnstructured(crtb), metav1.CreateOptions{}) if err != nil { return nil, err diff --git a/extensions/kubeapi/rbac/delete.go b/extensions/kubeapi/rbac/delete.go index 0f2e1651..a8b8b1a6 100644 --- a/extensions/kubeapi/rbac/delete.go +++ b/extensions/kubeapi/rbac/delete.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -14,7 +15,7 @@ func DeleteGlobalRoleBinding(client *rancher.Client, globalRoleBindingName strin return err } - globalRoleBindingResource := dynamicClient.Resource(GlobalRoleBindingGroupVersionResource) + globalRoleBindingResource := dynamicClient.Resource(groupversionresources.GlobalRoleBinding()) err = globalRoleBindingResource.Delete(context.TODO(), globalRoleBindingName, metav1.DeleteOptions{}) if err != nil { @@ -30,7 +31,7 @@ func DeleteGlobalRole(client *rancher.Client, globalRoleName string) error { return err } - globalRoleResource := dynamicClient.Resource(GlobalRoleGroupVersionResource) + globalRoleResource := dynamicClient.Resource(groupversionresources.GlobalRole()) err = globalRoleResource.Delete(context.TODO(), globalRoleName, metav1.DeleteOptions{}) if err != nil { @@ -46,7 +47,7 @@ func DeleteRoletemplate(client *rancher.Client, roleName string) error { return err } - roleResource := dynamicClient.Resource(RoleTemplateGroupVersionResource) + roleResource := dynamicClient.Resource(groupversionresources.RoleTemplate()) err = roleResource.Delete(context.TODO(), roleName, metav1.DeleteOptions{}) if err != nil { diff --git a/extensions/kubeapi/rbac/list.go b/extensions/kubeapi/rbac/list.go index 269d4404..2e21bf30 100644 --- a/extensions/kubeapi/rbac/list.go +++ b/extensions/kubeapi/rbac/list.go @@ -5,6 +5,7 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,7 +18,7 @@ func ListRoleBindings(client *rancher.Client, clusterName, namespace string, lis return nil, err } - unstructuredList, err := dynamicClient.Resource(RoleBindingGroupVersionResource).Namespace(namespace).List(context.Background(), listOpt) + unstructuredList, err := dynamicClient.Resource(groupversionresources.RoleBinding()).Namespace(namespace).List(context.Background(), listOpt) if err != nil { return nil, err } @@ -43,7 +44,7 @@ func ListClusterRoleBindings(client *rancher.Client, clusterName string, listOpt return nil, err } - unstructuredList, err := dynamicClient.Resource(ClusterRoleBindingGroupVersionResource).Namespace("").List(context.Background(), listOpt) + unstructuredList, err := dynamicClient.Resource(groupversionresources.ClusterRoleBinding()).Namespace("").List(context.Background(), listOpt) if err != nil { return nil, err } @@ -69,7 +70,7 @@ func ListGlobalRoleBindings(client *rancher.Client, listOpt metav1.ListOptions) return nil, err } - unstructuredList, err := dynamicClient.Resource(GlobalRoleBindingGroupVersionResource).List(context.TODO(), listOpt) + unstructuredList, err := dynamicClient.Resource(groupversionresources.GlobalRoleBinding()).List(context.TODO(), listOpt) if err != nil { return nil, err } @@ -95,7 +96,7 @@ func ListClusterRoleTemplateBindings(client *rancher.Client, listOpt metav1.List return nil, err } - unstructuredList, err := dynamicClient.Resource(ClusterRoleTemplateBindingGroupVersionResource).Namespace("").List(context.TODO(), listOpt) + unstructuredList, err := dynamicClient.Resource(groupversionresources.ClusterRoleTemplateBinding()).Namespace("").List(context.TODO(), listOpt) if err != nil { return nil, err } @@ -121,7 +122,7 @@ func ListGlobalRoles(client *rancher.Client, listOpt metav1.ListOptions) (*v3.Gl return nil, err } - unstructuredList, err := dynamicClient.Resource(GlobalRoleGroupVersionResource).List(context.TODO(), listOpt) + unstructuredList, err := dynamicClient.Resource(groupversionresources.GlobalRole()).List(context.TODO(), listOpt) if err != nil { return nil, err } @@ -147,7 +148,7 @@ func ListRoleTemplates(client *rancher.Client, listOpt metav1.ListOptions) (*v3. return nil, err } - unstructuredList, err := dynamicClient.Resource(RoleTemplateGroupVersionResource).List(context.TODO(), listOpt) + unstructuredList, err := dynamicClient.Resource(groupversionresources.RoleTemplate()).List(context.TODO(), listOpt) if err != nil { return nil, err } diff --git a/extensions/kubeapi/rbac/rbac.go b/extensions/kubeapi/rbac/rbac.go index 36cd3014..23887348 100644 --- a/extensions/kubeapi/rbac/rbac.go +++ b/extensions/kubeapi/rbac/rbac.go @@ -1,75 +1,5 @@ package rbac -import ( - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - const ( - GroupName = "management.cattle.io" - Version = "v3" LocalCluster = "local" ) - -// RoleGroupVersionResource is the required Group Version Resource for accessing roles in a cluster, using the dynamic client. -var RoleGroupVersionResource = schema.GroupVersionResource{ - Group: rbacv1.SchemeGroupVersion.Group, - Version: rbacv1.SchemeGroupVersion.Version, - Resource: "roles", -} - -// ClusterRoleGroupVersionResource is the required Group Version Resource for accessing clusterroles in a cluster, using the dynamic client. -var ClusterRoleGroupVersionResource = schema.GroupVersionResource{ - Group: rbacv1.SchemeGroupVersion.Group, - Version: rbacv1.SchemeGroupVersion.Version, - Resource: "clusterroles", -} - -// RoleBindingGroupVersionResource is the required Group Version Resource for accessing rolebindings in a cluster, using the dynamic client. -var RoleBindingGroupVersionResource = schema.GroupVersionResource{ - Group: rbacv1.SchemeGroupVersion.Group, - Version: rbacv1.SchemeGroupVersion.Version, - Resource: "rolebindings", -} - -// ClusterRoleBindingGroupVersionResource is the required Group Version Resource for accessing clusterrolebindings in a cluster, using the dynamic client. -var ClusterRoleBindingGroupVersionResource = schema.GroupVersionResource{ - Group: rbacv1.SchemeGroupVersion.Group, - Version: rbacv1.SchemeGroupVersion.Version, - Resource: "clusterrolebindings", -} - -// GlobalRoleGroupVersionResource is the required Group Version Resource for accessing global roles in a rancher server, using the dynamic client. -var GlobalRoleGroupVersionResource = schema.GroupVersionResource{ - Group: GroupName, - Version: Version, - Resource: "globalroles", -} - -// GlobalRoleBindingGroupVersionResource is the required Group Version Resource for accessing clusterrolebindings in a cluster, using the dynamic client. -var GlobalRoleBindingGroupVersionResource = schema.GroupVersionResource{ - Group: GroupName, - Version: Version, - Resource: "globalrolebindings", -} - -// ClusterRoleTemplateBindingGroupVersionResource is the required Group Version Resource for accessing clusterrolebindings in a cluster, using the dynamic client. -var ClusterRoleTemplateBindingGroupVersionResource = schema.GroupVersionResource{ - Group: GroupName, - Version: Version, - Resource: "clusterroletemplatebindings", -} - -// RoleTemplateGroupVersionResource is the required Group Version Resource for accessing roletemplates in a cluster, using the dynamic client. -var RoleTemplateGroupVersionResource = schema.GroupVersionResource{ - Group: GroupName, - Version: Version, - Resource: "roletemplates", -} - -// ProjectRoleTemplateBindingGroupVersionResource is the required Group Version Resource for accessing project role template bindings in a cluster, using the dynamic client. -var ProjectRoleTemplateBindingGroupVersionResource = schema.GroupVersionResource{ - Group: GroupName, - Version: Version, - Resource: "projectroletemplatebindings", -} diff --git a/extensions/kubeapi/rbac/update.go b/extensions/kubeapi/rbac/update.go index f5e1c587..f2afd982 100644 --- a/extensions/kubeapi/rbac/update.go +++ b/extensions/kubeapi/rbac/update.go @@ -5,6 +5,7 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -16,7 +17,7 @@ func UpdateGlobalRole(client *rancher.Client, updatedGlobalRole *v3.GlobalRole) if err != nil { return nil, err } - globalRoleResource := dynamicClient.Resource(GlobalRoleGroupVersionResource) + globalRoleResource := dynamicClient.Resource(groupversionresources.GlobalRole()) globalRolesUnstructured, err := globalRoleResource.Get(context.TODO(), updatedGlobalRole.Name, metav1.GetOptions{}) if err != nil { return nil, err @@ -49,7 +50,7 @@ func UpdateRoleTemplate(client *rancher.Client, updatedRoleTemplate *v3.RoleTemp if err != nil { return nil, err } - roleTemplateUnstructured := dynamicClient.Resource(RoleTemplateGroupVersionResource) + roleTemplateUnstructured := dynamicClient.Resource(groupversionresources.RoleTemplate()) roleTemplate, err := roleTemplateUnstructured.Get(context.TODO(), updatedRoleTemplate.Name, metav1.GetOptions{}) if err != nil { return nil, err @@ -83,7 +84,7 @@ func UpdateClusterRoleTemplateBindings(client *rancher.Client, existingCRTB *v3. if err != nil { return nil, err } - crtbUnstructured := dynamicClient.Resource(ClusterRoleTemplateBindingGroupVersionResource).Namespace(existingCRTB.Namespace) + crtbUnstructured := dynamicClient.Resource(groupversionresources.ClusterRoleTemplateBinding()).Namespace(existingCRTB.Namespace) clusterRoleTemplateBinding, err := crtbUnstructured.Get(context.TODO(), existingCRTB.Name, metav1.GetOptions{}) if err != nil { return nil, err diff --git a/extensions/kubeapi/resourcequotas/list.go b/extensions/kubeapi/resourcequotas/list.go index 188eeb75..9ce31fd7 100644 --- a/extensions/kubeapi/resourcequotas/list.go +++ b/extensions/kubeapi/resourcequotas/list.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +24,7 @@ func ListResourceQuotas(client *rancher.Client, clusterID string, namespace stri return nil, err } - resourceQuotaResource := dynamicClient.Resource(ResourceQuotaGroupVersionResource).Namespace(namespace) + resourceQuotaResource := dynamicClient.Resource(groupversionresources.ResourceQuota()).Namespace(namespace) quotas, err := resourceQuotaResource.List(context.TODO(), listOpts) if err != nil { return nil, err diff --git a/extensions/kubeapi/resourcequotas/resourcequotas.go b/extensions/kubeapi/resourcequotas/resourcequotas.go index dca900bc..0a6edd8f 100644 --- a/extensions/kubeapi/resourcequotas/resourcequotas.go +++ b/extensions/kubeapi/resourcequotas/resourcequotas.go @@ -6,17 +6,8 @@ import ( "github.com/rancher/shepherd/clients/rancher" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// ResourceQuotaGroupVersionResource is the required Group Version Resource for accessing resource quotas in a cluster, -// using the dynamic client. -var ResourceQuotaGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "resourcequotas", -} - // GetResourceQuotaByName is a helper function that returns the resource quota by name in a specific cluster. func GetResourceQuotaByName(client *rancher.Client, clusterID, name string) (*corev1.ResourceQuota, error) { resourceQuotaList, err := ListResourceQuotas(client, clusterID, "", metav1.ListOptions{}) diff --git a/extensions/kubeapi/secrets/create.go b/extensions/kubeapi/secrets/create.go index 6a70d7c7..c9559767 100644 --- a/extensions/kubeapi/secrets/create.go +++ b/extensions/kubeapi/secrets/create.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" @@ -17,7 +18,8 @@ func CreateSecretForCluster(client *rancher.Client, secret *corev1.Secret, clust if err != nil { return nil, err } - secretResource := dynamicClient.Resource(SecretGroupVersionResource).Namespace(namespace) + + secretResource := dynamicClient.Resource(groupversionresources.Secret()).Namespace(namespace) return CreateSecret(secretResource, secret) } diff --git a/extensions/kubeapi/secrets/list.go b/extensions/kubeapi/secrets/list.go index 789f75f3..526b262f 100644 --- a/extensions/kubeapi/secrets/list.go +++ b/extensions/kubeapi/secrets/list.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +24,7 @@ func ListSecrets(client *rancher.Client, clusterID, namespace string, listOpts m return nil, err } - secretResource := dynamicClient.Resource(SecretGroupVersionResource).Namespace(namespace) + secretResource := dynamicClient.Resource(groupversionresources.Secret()).Namespace(namespace) secrets, err := secretResource.List(context.TODO(), listOpts) if err != nil { return nil, err diff --git a/extensions/kubeapi/secrets/patchsecrets.go b/extensions/kubeapi/secrets/patchsecrets.go index e6537016..30738382 100644 --- a/extensions/kubeapi/secrets/patchsecrets.go +++ b/extensions/kubeapi/secrets/patchsecrets.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" coreV1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,7 +34,7 @@ func PatchSecret(client *rancher.Client, clusterID, secretName, namespace string return nil, err } - secretResource := dynamicClient.Resource(SecretGroupVersionResource).Namespace(namespace) + secretResource := dynamicClient.Resource(groupversionresources.Secret()).Namespace(namespace) unstructuredResp, err := secretResource.Patch(context.TODO(), secretName, patchType, []byte(patchJSONOperation), patchOpts) if err != nil { diff --git a/extensions/kubeapi/secrets/secrets.go b/extensions/kubeapi/secrets/secrets.go index 2ceae616..a82bcebd 100644 --- a/extensions/kubeapi/secrets/secrets.go +++ b/extensions/kubeapi/secrets/secrets.go @@ -3,22 +3,13 @@ package secrets import ( "context" - "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" coreV1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SecretGroupVersionResource is the required Group Version Resource for accessing secrets in a cluster, -// using the dynamic client. -var SecretGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "secrets", -} - // GetSecretByName is a helper function that uses the dynamic client to get a specific secret on a namespace for a specific cluster. func GetSecretByName(client *rancher.Client, clusterID, namespace, secretName string, getOpts metav1.GetOptions) (*coreV1.Secret, error) { dynamicClient, err := client.GetDownStreamClusterClient(clusterID) @@ -26,7 +17,7 @@ func GetSecretByName(client *rancher.Client, clusterID, namespace, secretName st return nil, err } - secretResource := dynamicClient.Resource(SecretGroupVersionResource).Namespace(namespace) + secretResource := dynamicClient.Resource(groupversionresources.Secret()).Namespace(namespace) unstructuredResp, err := secretResource.Get(context.TODO(), secretName, getOpts) if err != nil { diff --git a/extensions/kubeapi/services/create.go b/extensions/kubeapi/services/create.go index b7b1f599..d91e0d2e 100644 --- a/extensions/kubeapi/services/create.go +++ b/extensions/kubeapi/services/create.go @@ -4,21 +4,13 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// ServiceGroupVersionResource is the required Group Version Resource for accessing services in a cluster, -// using the dynamic client. -var ServiceGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "services", -} - // CreateService is a helper function that uses the dynamic client to create a service in a namespace for a specific cluster. func CreateService(client *rancher.Client, clusterName, serviceName, namespace string, spec corev1.ServiceSpec) (*corev1.Service, error) { dynamicClient, err := client.GetDownStreamClusterClient(clusterName) @@ -34,7 +26,7 @@ func CreateService(client *rancher.Client, clusterName, serviceName, namespace s Spec: spec, } - serviceResource := dynamicClient.Resource(ServiceGroupVersionResource).Namespace(namespace) + serviceResource := dynamicClient.Resource(groupversionresources.Service()).Namespace(namespace) unstructuredResp, err := serviceResource.Create(context.TODO(), unstructured.MustToUnstructured(service), metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/storageclasses/awsebs/create.go b/extensions/kubeapi/storageclasses/awsebs/create.go index 153c5940..f3910a51 100644 --- a/extensions/kubeapi/storageclasses/awsebs/create.go +++ b/extensions/kubeapi/storageclasses/awsebs/create.go @@ -5,7 +5,7 @@ import ( "strconv" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/kubeapi/storageclasses" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" storagev1 "k8s.io/api/storage/v1" @@ -39,7 +39,7 @@ func CreateAWSEBSStorageClass(client *rancher.Client, clusterName, fsType, encry return nil, err } - storageClassVolumesResource := dynamicClient.Resource(storageclasses.StorageClassGroupVersionResource).Namespace("") + storageClassVolumesResource := dynamicClient.Resource(groupversionresources.StorageClass()).Namespace("") unstructuredResp, err := storageClassVolumesResource.Create(context.TODO(), unstructuredStorageClass, metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/storageclasses/storageclasses.go b/extensions/kubeapi/storageclasses/storageclasses.go index 1e87179f..b3506038 100644 --- a/extensions/kubeapi/storageclasses/storageclasses.go +++ b/extensions/kubeapi/storageclasses/storageclasses.go @@ -4,17 +4,8 @@ import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// StorageClassGroupVersionResource is the required Group Version Resource for accessing storage classes in a cluster, -// using the dynamic client. -var StorageClassGroupVersionResource = schema.GroupVersionResource{ - Group: "storage.k8s.io", - Version: "v1", - Resource: "storageclasses", -} - // NewStorageClass is a constructor for a *PersistentVolume object `mountOptions` is an optional parameter and can be nil. func NewStorageClass(storageClassName, description string, mountOptions []string, reclaimPolicy corev1.PersistentVolumeReclaimPolicy, volumeBindingMode storagev1.VolumeBindingMode) *storagev1.StorageClass { annotations := map[string]string{ diff --git a/extensions/kubeapi/tokens/patchtokens.go b/extensions/kubeapi/tokens/patchtokens.go index 95c5be6c..e99e2adc 100644 --- a/extensions/kubeapi/tokens/patchtokens.go +++ b/extensions/kubeapi/tokens/patchtokens.go @@ -6,19 +6,13 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ) -var TokenGroupVersionResource = schema.GroupVersionResource{ - Group: "management.cattle.io", - Version: "v3", - Resource: "tokens", -} - // PatchToken is a helper function that uses the dynamic client to patch a token by its name. // Different token operations are supported: add, replace, remove. func PatchToken(client *rancher.Client, clusterID, tokenName, patchOp, patchPath, patchData string) (*v3.Token, *unstructured.Unstructured, error) { @@ -27,7 +21,7 @@ func PatchToken(client *rancher.Client, clusterID, tokenName, patchOp, patchPath return nil, nil, err } - tokenResource := dynamicClient.Resource(TokenGroupVersionResource) + tokenResource := dynamicClient.Resource(groupversionresources.Token()) patchJSONOperation := fmt.Sprintf(` [ diff --git a/extensions/kubeapi/volumes/persistentvolumeclaims/persistentvolumeclaims.go b/extensions/kubeapi/volumes/persistentvolumeclaims/persistentvolumeclaims.go index 0b3c3a69..197cee46 100644 --- a/extensions/kubeapi/volumes/persistentvolumeclaims/persistentvolumeclaims.go +++ b/extensions/kubeapi/volumes/persistentvolumeclaims/persistentvolumeclaims.go @@ -5,13 +5,14 @@ import ( "strconv" "github.com/rancher/shepherd/clients/rancher" + defaultAnnotations "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1Unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" ) const ( @@ -24,14 +25,6 @@ const ( AccessModeReadOnlyMany = "ReadOnlyMany" ) -// PersistentVolumeClaimGroupVersionResource is the required Group Version Resource for accessing persistent -// volume claims in a cluster, using the dynamic client. -var PersistentVolumeClaimGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "persistentvolumeclaims", -} - // CreatePersistentVolumeClaim is a helper function that uses the dynamic client to create a persistent // volume claim on a namespace for a specific cluster. // If you pass a PersistentVolume then `storageClass` and `storage` would be optional, otherwise `persistentVolume` @@ -41,7 +34,7 @@ func CreatePersistentVolumeClaim(client *rancher.Client, clusterName, persistent var unstructuredVolumeClaim *metav1Unstructured.Unstructured annotations := map[string]string{ - "field.cattle.io/description": description, + defaultAnnotations.Description: description, } persistentVolumeClaim := &corev1.PersistentVolumeClaim{ @@ -84,7 +77,7 @@ func CreatePersistentVolumeClaim(client *rancher.Client, clusterName, persistent return nil, err } - PersistentVolumeClaimResource := dynamicClient.Resource(PersistentVolumeClaimGroupVersionResource).Namespace(namespace) + PersistentVolumeClaimResource := dynamicClient.Resource(groupversionresources.PersistentVolumeClaim()).Namespace(namespace) unstructuredResp, err := PersistentVolumeClaimResource.Create(context.TODO(), unstructuredVolumeClaim, metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/volumes/persistentvolumes/awsebs/create.go b/extensions/kubeapi/volumes/persistentvolumes/awsebs/create.go index f8fa1543..1bb1e4f1 100644 --- a/extensions/kubeapi/volumes/persistentvolumes/awsebs/create.go +++ b/extensions/kubeapi/volumes/persistentvolumes/awsebs/create.go @@ -5,7 +5,7 @@ import ( "strconv" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/kubeapi/volumes/persistentvolumes" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" @@ -37,7 +37,7 @@ func CreateAWSEBSPersistentVolume(client *rancher.Client, clusterName, fsType, v return nil, err } - persistentVolumesResource := dynamicClient.Resource(persistentvolumes.PersistentVolumesGroupVersionResource).Namespace("") + persistentVolumesResource := dynamicClient.Resource(groupversionresources.PersistentVolume()).Namespace("") unstructuredResp, err := persistentVolumesResource.Create(context.TODO(), unstructuredPersistentVolume, metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/volumes/persistentvolumes/persistentvolumes.go b/extensions/kubeapi/volumes/persistentvolumes/persistentvolumes.go index df185d5b..e58ec4cf 100644 --- a/extensions/kubeapi/volumes/persistentvolumes/persistentvolumes.go +++ b/extensions/kubeapi/volumes/persistentvolumes/persistentvolumes.go @@ -1,25 +1,17 @@ package persistentvolumes import ( + defaultAnnotations "github.com/rancher/shepherd/extensions/defaults/annotations" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// PersistentVolumesGroupVersionResource is the required Group Version Resource for accessing persistent volumes in a cluster, -// using the dynamic client. -var PersistentVolumesGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "persistentvolumes", -} - // NewPersistentVolume is a constructor for a *PersistentVolume object // It registers a delete fuction. `nodeSelectorRequirement`, `mountOptions`, `storageClass` are optional parameters if those are not needed pass nil for them will suffice func NewPersistentVolume(volumeName, description string, accessModes []corev1.PersistentVolumeAccessMode, nodeSelectorRequirement []corev1.NodeSelectorRequirement, mountOptions []string, storageClass *storagev1.StorageClass) *corev1.PersistentVolume { annotations := map[string]string{ - "field.cattle.io/description": description, + defaultAnnotations.Description: description, } persistentVolume := &corev1.PersistentVolume{ diff --git a/extensions/kubeapi/workloads/cronjobs/create.go b/extensions/kubeapi/workloads/cronjobs/create.go index d02606bd..f0d5ca1b 100644 --- a/extensions/kubeapi/workloads/cronjobs/create.go +++ b/extensions/kubeapi/workloads/cronjobs/create.go @@ -4,7 +4,8 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" "github.com/rancher/shepherd/pkg/wait" @@ -13,18 +14,9 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" ) -// CronJobGroupVersionResource is the required Group Version Resource for accessing cron jobs in a cluster, -// using the dynamic client. -var CronJobGroupVersionResource = schema.GroupVersionResource{ - Group: "batch", - Version: "v1beta1", - Resource: "cronjobs", -} - // CreateCronJob is a helper function that uses the dynamic client to create a cronjob on a namespace for a specific cluster. // It registers a delete fuction a wait.WatchWait to ensure the cronjob is deleted cleanly. func CreateCronJob(client *rancher.Client, clusterName, cronJobName, namespace, schedule string, template corev1.PodTemplateSpec) (*v1beta1.CronJob, error) { @@ -49,7 +41,7 @@ func CreateCronJob(client *rancher.Client, clusterName, cronJobName, namespace, }, } - cronJobResource := dynamicClient.Resource(CronJobGroupVersionResource).Namespace(namespace) + cronJobResource := dynamicClient.Resource(groupversionresources.CronJob()).Namespace(namespace) unstructuredResp, err := cronJobResource.Create(context.TODO(), unstructured.MustToUnstructured(cronJob), metav1.CreateOptions{}) if err != nil { @@ -67,7 +59,7 @@ func CreateCronJob(client *rancher.Client, clusterName, cronJobName, namespace, watchInterface, err := cronJobResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + unstructuredResp.GetName(), - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { diff --git a/extensions/kubeapi/workloads/daemonsets/create.go b/extensions/kubeapi/workloads/daemonsets/create.go index 18db7471..62e89753 100644 --- a/extensions/kubeapi/workloads/daemonsets/create.go +++ b/extensions/kubeapi/workloads/daemonsets/create.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/rancher/shepherd/clients/rancher" + defaultlabels "github.com/rancher/shepherd/extensions/defaults/labels" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" appv1 "k8s.io/api/apps/v1" @@ -20,7 +22,7 @@ func CreateDaemonSet(client *rancher.Client, clusterName, daemonSetName, namespa } labels := map[string]string{} - labels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.daemonset-%v-%v", namespace, daemonSetName) + labels[defaultlabels.WorkloadSelector] = fmt.Sprintf("apps.daemonset-%v-%v", namespace, daemonSetName) template.ObjectMeta = metav1.ObjectMeta{ Labels: labels, @@ -40,7 +42,7 @@ func CreateDaemonSet(client *rancher.Client, clusterName, daemonSetName, namespa }, } - daemonSetResource := dynamicClient.Resource(DaemonSetGroupVersionResource).Namespace(namespace) + daemonSetResource := dynamicClient.Resource(groupversionresources.Daemonset()).Namespace(namespace) unstructuredResp, err := daemonSetResource.Create(context.TODO(), unstructured.MustToUnstructured(daemonSet), metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/workloads/daemonsets/daemonsets.go b/extensions/kubeapi/workloads/daemonsets/daemonsets.go index b1717c98..8eb8a678 100644 --- a/extensions/kubeapi/workloads/daemonsets/daemonsets.go +++ b/extensions/kubeapi/workloads/daemonsets/daemonsets.go @@ -4,20 +4,12 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" appv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// DaemonSetGroupVersionResource is the required Group Version Resource for accessing daemon sets in a cluster, -// using the dynamic client. -var DaemonSetGroupVersionResource = schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "daemonsets", -} - // GetDaemonsetByName is a helper function that uses the dynamic client to get a specific daemonset on a namespace for a specific cluster. func GetDaemonsetByName(client *rancher.Client, clusterID, namespace, daemonsetName string) (*appv1.DaemonSet, error) { dynamicClient, err := client.GetDownStreamClusterClient(clusterID) @@ -25,7 +17,7 @@ func GetDaemonsetByName(client *rancher.Client, clusterID, namespace, daemonsetN return nil, err } - daemonsetResource := dynamicClient.Resource(DaemonSetGroupVersionResource).Namespace(namespace) + daemonsetResource := dynamicClient.Resource(groupversionresources.Daemonset()).Namespace(namespace) unstructuredResp, err := daemonsetResource.Get(context.TODO(), daemonsetName, metav1.GetOptions{}) if err != nil { return nil, err diff --git a/extensions/kubeapi/workloads/daemonsets/list.go b/extensions/kubeapi/workloads/daemonsets/list.go index 9d34ac9f..e3c31c14 100644 --- a/extensions/kubeapi/workloads/daemonsets/list.go +++ b/extensions/kubeapi/workloads/daemonsets/list.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" appv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,7 +24,7 @@ func ListDaemonsets(client *rancher.Client, clusterID, namespace string, listOpt return nil, err } - daemonsetResource := dynamicClient.Resource(DaemonSetGroupVersionResource).Namespace(namespace) + daemonsetResource := dynamicClient.Resource(groupversionresources.Daemonset()).Namespace(namespace) daemonsets, err := daemonsetResource.List(context.TODO(), listOpts) if err != nil { return nil, err diff --git a/extensions/kubeapi/workloads/deployments/create.go b/extensions/kubeapi/workloads/deployments/create.go index 42e3038d..86f2d270 100644 --- a/extensions/kubeapi/workloads/deployments/create.go +++ b/extensions/kubeapi/workloads/deployments/create.go @@ -5,22 +5,15 @@ import ( "fmt" "github.com/rancher/shepherd/clients/rancher" + defaultlabels "github.com/rancher/shepherd/extensions/defaults/labels" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// DeploymentGroupVersionResource is the required Group Version Resource for accessing deployments in a cluster, -// using the dynamic client. -var DeploymentGroupVersionResource = schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", -} - // CreateDeployment is a helper function that uses the dynamic client to create a deployment on a namespace for a specific cluster. func CreateDeployment(client *rancher.Client, clusterName, deploymentName, namespace string, template corev1.PodTemplateSpec, replicas int32) (*appv1.Deployment, error) { dynamicClient, err := client.GetDownStreamClusterClient(clusterName) @@ -29,7 +22,7 @@ func CreateDeployment(client *rancher.Client, clusterName, deploymentName, names } labels := map[string]string{} - labels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.deployment-%v-%v", namespace, deploymentName) + labels[defaultlabels.WorkloadSelector] = fmt.Sprintf("apps.deployment-%v-%v", namespace, deploymentName) template.ObjectMeta = metav1.ObjectMeta{ Labels: labels, @@ -50,7 +43,7 @@ func CreateDeployment(client *rancher.Client, clusterName, deploymentName, names }, } - deploymentResource := dynamicClient.Resource(DeploymentGroupVersionResource).Namespace(namespace) + deploymentResource := dynamicClient.Resource(groupversionresources.Deployment()).Namespace(namespace) unstructuredResp, err := deploymentResource.Create(context.TODO(), unstructured.MustToUnstructured(deployment), metav1.CreateOptions{}) if err != nil { diff --git a/extensions/kubeapi/workloads/deployments/list.go b/extensions/kubeapi/workloads/deployments/list.go index 30609f81..d8d93c6f 100644 --- a/extensions/kubeapi/workloads/deployments/list.go +++ b/extensions/kubeapi/workloads/deployments/list.go @@ -4,6 +4,7 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" appv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,7 +23,7 @@ func ListDeployments(client *rancher.Client, clusterID, namespace string, listOp if err != nil { return nil, err } - deploymentResource := dynamicClient.Resource(DeploymentGroupVersionResource).Namespace(namespace) + deploymentResource := dynamicClient.Resource(groupversionresources.Deployment()).Namespace(namespace) deployments, err := deploymentResource.List(context.TODO(), listOpts) if err != nil { return nil, err diff --git a/extensions/kubeapi/workloads/jobs/create.go b/extensions/kubeapi/workloads/jobs/create.go index 0a388f5b..2efef505 100644 --- a/extensions/kubeapi/workloads/jobs/create.go +++ b/extensions/kubeapi/workloads/jobs/create.go @@ -4,7 +4,8 @@ import ( "context" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/unstructured" "github.com/rancher/shepherd/pkg/api/scheme" "github.com/rancher/shepherd/pkg/wait" @@ -12,18 +13,9 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" ) -// JobGroupVersionResource is the required Group Version Resource for accessing jobs in a cluster, -// using the dynamic client. -var JobGroupVersionResource = schema.GroupVersionResource{ - Group: "batch", - Version: "v1", - Resource: "jobs", -} - // CreateJob is a helper function that uses the dynamic client to create a batch job on a namespace for a specific cluster. // It registers a delete fuction a wait.WatchWait to ensure the job is deleted cleanly. func CreateJob(client *rancher.Client, clusterName, jobName, namespace string, template corev1.PodTemplateSpec) (*batchv1.Job, error) { @@ -43,7 +35,7 @@ func CreateJob(client *rancher.Client, clusterName, jobName, namespace string, t }, } - jobResource := dynamicClient.Resource(JobGroupVersionResource).Namespace(namespace) + jobResource := dynamicClient.Resource(groupversionresources.Job()).Namespace(namespace) unstructuredResp, err := jobResource.Create(context.TODO(), unstructured.MustToUnstructured(job), metav1.CreateOptions{}) if err != nil { @@ -61,7 +53,7 @@ func CreateJob(client *rancher.Client, clusterName, jobName, namespace string, t watchInterface, err := jobResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + unstructuredResp.GetName(), - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { diff --git a/extensions/kubeapi/workloads/pods/podstatus.go b/extensions/kubeapi/workloads/pods/podstatus.go index 67105b60..0e46bb56 100644 --- a/extensions/kubeapi/workloads/pods/podstatus.go +++ b/extensions/kubeapi/workloads/pods/podstatus.go @@ -5,20 +5,12 @@ import ( "fmt" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// PodGroupVersion is the required Group Version for accessing pods in a cluster, -// using the dynamic client. -var PodGroupVersionResource = schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "pods", -} - // StatusPods is a helper function that uses the dynamic client to list pods on a namespace for a specific cluster with its list options. func StatusPods(client *rancher.Client, clusterID string, listOpts metav1.ListOptions) ([]string, []error) { var podList []corev1.Pod @@ -27,7 +19,7 @@ func StatusPods(client *rancher.Client, clusterID string, listOpts metav1.ListOp if err != nil { return nil, []error{err} } - podResource := dynamicClient.Resource(PodGroupVersionResource) + podResource := dynamicClient.Resource(groupversionresources.Pod()) pods, err := podResource.List(context.TODO(), listOpts) if err != nil { return nil, []error{err} diff --git a/extensions/kubeapi/workloads/template.go b/extensions/kubeapi/workloads/template.go index 7a7cf68b..42cc2976 100644 --- a/extensions/kubeapi/workloads/template.go +++ b/extensions/kubeapi/workloads/template.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/kubeapi/secrets" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" "github.com/rancher/shepherd/pkg/api/scheme" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,7 +18,7 @@ func NewImagePullSecret(client *rancher.Client, clusterName, namespace string) ( return nil, err } - resp, err := k8sClient.Resource(secrets.SecretGroupVersionResource).Namespace(namespace).List(context.TODO(), metav1.ListOptions{}) + resp, err := k8sClient.Resource(groupversionresources.Secret()).Namespace(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err } diff --git a/extensions/kubeconfig/exec.go b/extensions/kubeconfig/exec.go index 6a6838ff..14fb5d21 100644 --- a/extensions/kubeconfig/exec.go +++ b/extensions/kubeconfig/exec.go @@ -22,8 +22,6 @@ const ( apiPath = "/api" ) -var podGroupVersion = corev1.SchemeGroupVersion.WithResource("pods").GroupVersion() - // LogStreamer is a struct that acts like io.Writer inorder to retrieve Stdout from a kubectl exec command in pod type LogStreamer struct { b bytes.Buffer @@ -45,6 +43,7 @@ func (l *LogStreamer) Write(p []byte) (n int, err error) { // takes the kubeconfig in form of a restclient.Config object, the pod name, the namespace of the pod, // and the command a user wants to run. func KubectlExec(restConfig *restclient.Config, podName, namespace string, command []string) (*LogStreamer, error) { + podGroupVersion := corev1.SchemeGroupVersion.WithResource("pods").GroupVersion() restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) restConfig.ContentConfig.GroupVersion = &podGroupVersion restConfig.APIPath = apiPath @@ -85,6 +84,7 @@ func KubectlExec(restConfig *restclient.Config, podName, namespace string, comma // the kubeconfig in form of a restclient.Config object, the pod name, the namespace of the pod, the filename, and then // the local destination (dest) where the file will be copied to. func CopyFileFromPod(restConfig *restclient.Config, clientConfig clientcmd.ClientConfig, podName, namespace, filename, dest string) error { + podGroupVersion := corev1.SchemeGroupVersion.WithResource("pods").GroupVersion() restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) restConfig.ContentConfig.GroupVersion = &podGroupVersion restConfig.APIPath = apiPath diff --git a/extensions/kubeconfig/podlogs.go b/extensions/kubeconfig/podlogs.go index 414787cb..7e510eb8 100644 --- a/extensions/kubeconfig/podlogs.go +++ b/extensions/kubeconfig/podlogs.go @@ -27,6 +27,8 @@ func GetPodLogs(client *rancher.Client, clusterID string, podName string, namesp if err != nil { return "", err } + + podGroupVersion := corev1.SchemeGroupVersion.WithResource("pods").GroupVersion() restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) restConfig.ContentConfig.GroupVersion = &podGroupVersion restConfig.APIPath = apiPath diff --git a/extensions/kubectl/command.go b/extensions/kubectl/command.go index 8f2ff86a..088a9629 100644 --- a/extensions/kubectl/command.go +++ b/extensions/kubectl/command.go @@ -10,9 +10,9 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/extensions/kubeconfig" "github.com/rancher/shepherd/extensions/workloads" - "github.com/rancher/shepherd/extensions/workloads/pods" corev1 "k8s.io/api/core/v1" ) @@ -97,7 +97,7 @@ func Command(client *rancher.Client, yamlContent *management.ImportClusterYamlIn } steveClient := client.Steve - pods, err := steveClient.SteveType(pods.PodResourceSteveType).NamespacedSteveClient(Namespace).List(nil) + pods, err := steveClient.SteveType(stevetypes.Pod).NamespacedSteveClient(Namespace).List(nil) if err != nil { return "", err } diff --git a/extensions/kubectl/template.go b/extensions/kubectl/template.go index a0f35f43..8a1c2f80 100644 --- a/extensions/kubectl/template.go +++ b/extensions/kubectl/template.go @@ -6,6 +6,7 @@ import ( namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/kubeconfig" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -31,8 +32,6 @@ const ( JobName = "kubectl" ) -var importTimeout = int64(60 * 2) - // CreateJobAndRunKubectlCommands is a helper to create a job and run the kubectl commands in the pods of the Job. // It then returns errors or nil from the job. func CreateJobAndRunKubectlCommands(clusterID, jobname string, job *batchv1.Job, client *rancher.Client) error { @@ -100,7 +99,7 @@ func CreateJobAndRunKubectlCommands(clusterID, jobname string, job *batchv1.Job, jobWatch, err := downClient.Resource(batchv1.SchemeGroupVersion.WithResource("jobs")).Namespace(Namespace).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", job.Name).String(), - TimeoutSeconds: &importTimeout, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.TwoMinute), }) if err != nil { return err diff --git a/extensions/machinepools/machinepools.go b/extensions/machinepools/machinepools.go index e4e74f1b..efe55ad7 100644 --- a/extensions/machinepools/machinepools.go +++ b/extensions/machinepools/machinepools.go @@ -11,7 +11,11 @@ import ( rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/labels" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/kubeapi/secrets" nodestat "github.com/rancher/shepherd/extensions/nodes" "github.com/sirupsen/logrus" @@ -21,15 +25,9 @@ import ( ) const ( - active = "active" - fleetNamespace = "fleet-default" - initNodeLabelKey = "rke.cattle.io/init-node" - local = "local" - machineNameSteveLabel = "rke.cattle.io/machine-name" - machinePlanSecretType = "rke.cattle.io/machine-plan" - machineSteveResourceType = "cluster.x-k8s.io.machine" - pool = "pool" - True = "true" + local = "local" + pool = "pool" + True = "true" nodeRoleListLength = 4 ) @@ -58,7 +56,7 @@ func MatchNodeRolesToMachinePool(nodeRoles NodeRoles, machinePools []apisV1.RKEM // updateMachinePoolQuantity is a helper method that will update the desired machine pool with the latest quantity. func updateMachinePoolQuantity(client *rancher.Client, cluster *v1.SteveAPIObject, nodeRoles NodeRoles) (*v1.SteveAPIObject, error) { - updateCluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(cluster.ID) + updateCluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(cluster.ID) if err != nil { return nil, err } @@ -76,24 +74,24 @@ func updateMachinePoolQuantity(client *rancher.Client, cluster *v1.SteveAPIObjec updatedCluster.Spec.RKEConfig.MachinePools[machineConfig].Quantity = &newQuantity logrus.Infof("Scaling the machine pool to %v total nodes", newQuantity) - cluster, err = client.Steve.SteveType("provisioning.cattle.io.cluster").Update(cluster, updatedCluster) + cluster, err = client.Steve.SteveType(stevetypes.Provisioning).Update(cluster, updatedCluster) if err != nil { return nil, err } - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { + err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, timeouts.ThirtyMinute, true, func(ctx context.Context) (done bool, err error) { client, err = client.ReLogin() if err != nil { return false, err } - clusterResp, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(cluster.ID) + clusterResp, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(cluster.ID) if err != nil { return false, err } - if clusterResp.ObjectMeta.State.Name == active && - nodestat.AllMachineReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) == nil { + if clusterResp.ObjectMeta.State.Name == states.Active && + nodestat.AllMachineReady(client, cluster.ID, timeouts.ThirtyMinute) == nil { return true, nil } @@ -303,8 +301,8 @@ func MatchRoleToPool(poolRole string, allRoles []Roles) int { // object for rke2/k3s clusters func GetInitMachine(client *rancher.Client, clusterID string) (*v1.SteveAPIObject, error) { logrus.Info("Retrieving secret and identifying machine...") - secret, err := secrets.ListSecrets(client, local, fleetNamespace, metav1.ListOptions{ - LabelSelector: initNodeLabelKey + "=" + True, + secret, err := secrets.ListSecrets(client, local, namespaces.Fleet, metav1.ListOptions{ + LabelSelector: labels.InitNode + "=" + True, }) if err != nil { return nil, err @@ -312,10 +310,10 @@ func GetInitMachine(client *rancher.Client, clusterID string) (*v1.SteveAPIObjec // secret.Items[0] will never change when targeting the init node secret, // as the list has been filtered above to grab the single init node secret - initNodeMachineName := secret.Items[0].ObjectMeta.Labels[machineNameSteveLabel] + initNodeMachineName := secret.Items[0].ObjectMeta.Labels[labels.MachineName] logrus.Info("Retrieving machine...") - initMachine, err := client.Steve.SteveType(machineSteveResourceType).ByID(fleetNamespace + "/" + initNodeMachineName) + initMachine, err := client.Steve.SteveType(stevetypes.Machine).ByID(namespaces.Fleet + "/" + initNodeMachineName) if err != nil { return nil, err } diff --git a/extensions/namespaces/create.go b/extensions/namespaces/create.go index b24e7ed5..3047a38c 100644 --- a/extensions/namespaces/create.go +++ b/extensions/namespaces/create.go @@ -9,8 +9,10 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/defaults" - "github.com/rancher/shepherd/extensions/kubeapi/namespaces" + defaultAnnotations "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/schema/groupversionresources" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/pkg/api/scheme" "github.com/rancher/shepherd/pkg/wait" coreV1 "k8s.io/api/core/v1" @@ -33,10 +35,10 @@ func CreateNamespace(client *rancher.Client, namespaceName, containerDefaultReso annotations = make(map[string]string) } if containerDefaultResourceLimit != "" { - annotations["field.cattle.io/containerDefaultResourceLimit"] = containerDefaultResourceLimit + annotations[defaultAnnotations.ContainerResourceLimit] = containerDefaultResourceLimit } if project != nil { - annotations["field.cattle.io/projectId"] = project.ID + annotations[defaultAnnotations.ProjectId] = project.ID } namespace := &coreV1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -51,7 +53,7 @@ func CreateNamespace(client *rancher.Client, namespaceName, containerDefaultReso return nil, err } - nameSpaceClient := steveClient.SteveType(NamespaceSteveType) + nameSpaceClient := steveClient.SteveType(stevetypes.Namespace) resp, err := nameSpaceClient.Create(namespace) if err != nil { @@ -73,7 +75,7 @@ func CreateNamespace(client *rancher.Client, namespaceName, containerDefaultReso clusterRoleWatch, err := clusterRoleResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + fmt.Sprintf("%s-namespaces-edit", projectID), - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { @@ -108,7 +110,7 @@ func CreateNamespace(client *rancher.Client, namespaceName, containerDefaultReso return err } - nameSpaceClient = steveClient.SteveType(NamespaceSteveType) + nameSpaceClient = steveClient.SteveType(stevetypes.Namespace) err := nameSpaceClient.Delete(resp) if errors.IsNotFound(err) { return nil @@ -117,10 +119,10 @@ func CreateNamespace(client *rancher.Client, namespaceName, containerDefaultReso return err } - adminNamespaceResource := adminDynamicClient.Resource(namespaces.NamespaceGroupVersionResource).Namespace("") + adminNamespaceResource := adminDynamicClient.Resource(groupversionresources.Namespace()).Namespace("") watchInterface, err := adminNamespaceResource.Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + resp.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { diff --git a/extensions/nodes/node_status.go b/extensions/nodes/node_status.go index 17bfb031..b120e9a0 100644 --- a/extensions/nodes/node_status.go +++ b/extensions/nodes/node_status.go @@ -9,25 +9,20 @@ import ( rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" ) const ( - activeState = "active" - runningState = "running" - errorState = "error" - machineSteveResourceType = "cluster.x-k8s.io.machine" - machineSteveAnnotation = "cluster.x-k8s.io/machine" - fleetNamespace = "fleet-default" - etcdLabel = "rke.cattle.io/etcd-role" - clusterLabel = "cluster.x-k8s.io/cluster-name" - PollInterval = time.Duration(5 * time.Second) PollTimeout = time.Duration(15 * time.Minute) oneSecondInterval = time.Duration(1 * time.Second) - fiveMinuteTimeout = time.Duration(5 * time.Minute) httpNotFound = "404 Not Found" ) @@ -53,13 +48,13 @@ func AllManagementNodeReady(client *rancher.Client, ClusterID string, timeout ti return false, nil } - if node.State == errorState { + if node.State == states.Error { logrus.Warnf("node %s is in error state", node.Name) return false, nil } - if node.State != activeState { + if node.State != states.Active { return false, nil } } @@ -88,8 +83,8 @@ func AllMachineReady(client *rancher.Client, clusterID string, timeout time.Dura for _, node := range nodes.Data { machine, err := client.Steve. - SteveType(machineSteveResourceType). - ByID(fleetNamespace + "/" + node.Annotations[machineSteveAnnotation]) + SteveType(stevetypes.Machine). + ByID(namespaces.Fleet + "/" + node.Annotations[annotations.Machine]) if err != nil { return false, err } @@ -104,7 +99,7 @@ func AllMachineReady(client *rancher.Client, clusterID string, timeout time.Dura return false, nil } - if machine.State.Name != runningState { + if machine.State.Name != states.Running { return false, nil } } @@ -120,7 +115,7 @@ func AllMachineReady(client *rancher.Client, clusterID string, timeout time.Dura func AllNodeDeleted(client *rancher.Client, ClusterID string) error { ctx := context.Background() err := wait.PollUntilContextTimeout( - ctx, oneSecondInterval, fiveMinuteTimeout, true, func(ctx context.Context) (bool, error) { + ctx, oneSecondInterval, timeouts.FiveMinute, true, func(ctx context.Context) (bool, error) { nodes, err := client.Management.Node.ListAll(&types.ListOpts{ Filters: map[string]interface{}{ "clusterId": ClusterID, @@ -149,7 +144,7 @@ func IsNodeReplaced(client *rancher.Client, oldMachineID string, clusterID strin ctx := context.Background() err := wait.PollUntilContextTimeout( - ctx, oneSecondInterval, PollTimeout, true, func(ctx context.Context) (bool, error) { + ctx, oneSecondInterval, timeouts.FifteenMinute, true, func(ctx context.Context) (bool, error) { machines, err := client.Management.Node.ListAll(&types.ListOpts{Filters: map[string]interface{}{ "clusterId": clusterID, }}) @@ -188,7 +183,7 @@ func Isv1NodeConditionMet(client *rancher.Client, machineID, clusterID, conditio ctx := context.Background() err = wait.PollUntilContextTimeout( - ctx, PollInterval, PollTimeout, true, func(ctx context.Context) (bool, error) { + ctx, PollInterval, timeouts.FifteenMinute, true, func(ctx context.Context) (bool, error) { refreshedMachine, err := steveclient.SteveType("node").ByID(machineID) if err != nil { if strings.Contains(err.Error(), httpNotFound) { diff --git a/extensions/pipeline/setup.go b/extensions/pipeline/setup.go index a140316a..66614cbc 100644 --- a/extensions/pipeline/setup.go +++ b/extensions/pipeline/setup.go @@ -10,6 +10,7 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/extensions/kubeapi/cluster" "github.com/rancher/shepherd/extensions/token" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -93,7 +94,7 @@ func UpdateEULA(adminClient *rancher.Client) error { return false, err } - urlSettingResp, err = steveClient.SteveType("management.cattle.io.setting").ByID("server-url") + urlSettingResp, err = steveClient.SteveType(stevetypes.ManagementSetting).ByID("server-url") if err != nil { serverURL = err return false, nil @@ -119,7 +120,7 @@ func UpdateEULA(adminClient *rancher.Client) error { urlSetting.Value = fmt.Sprintf("https://%s", adminClient.RancherConfig.Host) - _, err = steveClient.SteveType("management.cattle.io.setting").Update(urlSettingResp, urlSetting) + _, err = steveClient.SteveType(stevetypes.ManagementSetting).Update(urlSettingResp, urlSetting) if err != nil { return err } @@ -135,7 +136,7 @@ func UpdateEULA(adminClient *rancher.Client) error { var pollError error err = kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) { - _, err = steveClient.SteveType("management.cattle.io.setting").Create(settingEULA) + _, err = steveClient.SteveType(stevetypes.ManagementSetting).Create(settingEULA) if err != nil && !strings.Contains(err.Error(), "409 Conflict") { pollError = err @@ -143,7 +144,7 @@ func UpdateEULA(adminClient *rancher.Client) error { } urlSetting := &v3.Setting{} - urlSettingResp, err := steveClient.SteveType("management.cattle.io.setting").ByID("server-url") + urlSettingResp, err := steveClient.SteveType(stevetypes.ManagementSetting).ByID("server-url") if err != nil { return false, err } diff --git a/extensions/provisioning/creates.go b/extensions/provisioning/creates.go index 72fb3912..e2246681 100644 --- a/extensions/provisioning/creates.go +++ b/extensions/provisioning/creates.go @@ -22,8 +22,11 @@ import ( "github.com/rancher/shepherd/extensions/clusters/aks" "github.com/rancher/shepherd/extensions/clusters/eks" "github.com/rancher/shepherd/extensions/clusters/gke" - "github.com/rancher/shepherd/extensions/defaults" - "github.com/rancher/shepherd/extensions/etcdsnapshot" + "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" k3sHardening "github.com/rancher/shepherd/extensions/hardening/k3s" rke2Hardening "github.com/rancher/shepherd/extensions/hardening/rke2" "github.com/rancher/shepherd/extensions/machinepools" @@ -46,11 +49,6 @@ import ( ) const ( - active = "active" - internalIP = "alpha.kubernetes.io/provided-node-ip" - rke1ExternalIP = "rke.cattle.io/external-ip" - namespace = "fleet-default" - rke2k3sAirgapCustomCluster = "rke2k3sairgapcustomcluster" rke2k3sNodeCorralName = "rke2k3sregisterNode" corralPackageAirgapCustomClusterName = "airgapCustomCluster" @@ -74,7 +72,7 @@ func CreateProvisioningCluster(client *rancher.Client, provider Provider, cluste clusterName := namegen.AppendRandomString(provider.Name.String()) generatedPoolName := fmt.Sprintf("nc-%s-pool1-", clusterName) - machinePoolConfigs := provider.MachinePoolFunc(generatedPoolName, namespace) + machinePoolConfigs := provider.MachinePoolFunc(generatedPoolName, namespaces.Fleet) var machinePoolResponses []v1.SteveAPIObject @@ -97,7 +95,7 @@ func CreateProvisioningCluster(client *rancher.Client, provider Provider, cluste } secretName := fmt.Sprintf("priv-reg-sec-%s", clusterName) - secretTemplate := secrets.NewSecretTemplate(secretName, namespace, map[string][]byte{ + secretTemplate := secrets.NewSecretTemplate(secretName, namespaces.Fleet, map[string][]byte{ "password": []byte(clustersConfig.Registries.RKE2Password), "username": []byte(clustersConfig.Registries.RKE2Username), }, @@ -152,7 +150,7 @@ func CreateProvisioningCluster(client *rancher.Client, provider Provider, cluste } } - cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespace, clustersConfig, machinePools, cloudCredential.ID) + cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespaces.Fleet, clustersConfig, machinePools, cloudCredential.ID) for _, truncatedPool := range hostnameTruncation { if truncatedPool.PoolNameLengthLimit > 0 || truncatedPool.ClusterNameLengthLimit > 0 { @@ -180,8 +178,8 @@ func CreateProvisioningCluster(client *rancher.Client, provider Provider, cluste } createdCluster, err := adminClient.Steve. - SteveType(clusters.ProvisioningSteveResourceType). - ByID(namespace + "/" + clusterName) + SteveType(stevetypes.Provisioning). + ByID(namespaces.Fleet + "/" + clusterName) return createdCluster, err } @@ -231,7 +229,7 @@ func CreateProvisioningCustomCluster(client *rancher.Client, externalNodeProvide clusterName := namegen.AppendRandomString(externalNodeProvider.Name) - cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespace, clustersConfig, nil, "") + cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespaces.Fleet, clustersConfig, nil, "") clusterResp, err := clusters.CreateK3SRKE2Cluster(client, cluster) if err != nil { @@ -247,7 +245,7 @@ func CreateProvisioningCustomCluster(client *rancher.Client, externalNodeProvide return nil, err } - customCluster, err := client.Steve.SteveType(etcdsnapshot.ProvisioningSteveResouceType).ByID(clusterResp.ID) + customCluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(clusterResp.ID) if err != nil { return nil, err } @@ -268,9 +266,9 @@ func CreateProvisioningCustomCluster(client *rancher.Client, externalNodeProvide return nil, err } - result, err := kubeProvisioningClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ + result, err := kubeProvisioningClient.Clusters(namespaces.Fleet).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + clusterName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) if err != nil { return nil, err @@ -343,14 +341,14 @@ func CreateProvisioningCustomCluster(client *rancher.Client, externalNodeProvide return nil, err } - hardenCluster = clusters.HardenK3SClusterConfig(clusterName, namespace, clustersConfig, nil, "") + hardenCluster = clusters.HardenK3SClusterConfig(clusterName, namespaces.Fleet, clustersConfig, nil, "") } else { err = rke2Hardening.HardenNodes(nodes, rolesPerNode) if err != nil { return nil, err } - hardenCluster = clusters.HardenRKE2ClusterConfig(clusterName, namespace, clustersConfig, nil, "") + hardenCluster = clusters.HardenRKE2ClusterConfig(clusterName, namespaces.Fleet, clustersConfig, nil, "") } _, err := clusters.UpdateK3SRKE2Cluster(client, clusterResp, hardenCluster) @@ -362,8 +360,8 @@ func CreateProvisioningCustomCluster(client *rancher.Client, externalNodeProvide } createdCluster, err := client.Steve. - SteveType(clusters.ProvisioningSteveResourceType). - ByID(namespace + "/" + clusterName) + SteveType(stevetypes.Provisioning). + ByID(namespaces.Fleet + "/" + clusterName) return createdCluster, err } @@ -518,7 +516,7 @@ func CreateProvisioningAirgapCustomCluster(client *rancher.Client, clustersConfi clusterName := namegen.AppendRandomString(rke2k3sAirgapCustomCluster) - cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespace, clustersConfig, nil, "") + cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespaces.Fleet, clustersConfig, nil, "") clusterResp, err := clusters.CreateK3SRKE2Cluster(client, cluster) if err != nil { @@ -530,7 +528,7 @@ func CreateProvisioningAirgapCustomCluster(client *rancher.Client, clustersConfi return nil, err } - customCluster, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterResp.ID) + customCluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(clusterResp.ID) if err != nil { return nil, err } @@ -573,7 +571,7 @@ func CreateProvisioningAirgapCustomCluster(client *rancher.Client, clustersConfi } } - createdCluster, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(namespace + "/" + clusterName) + createdCluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(namespaces.Fleet + "/" + clusterName) return createdCluster, err } @@ -839,14 +837,14 @@ func AddRKE2K3SCustomClusterNodes(client *rancher.Client, cluster *v1.SteveAPIOb logrus.Infof(output) } - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - clusterResp, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(cluster.ID) + err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, timeouts.ThirtyMinute, true, func(ctx context.Context) (done bool, err error) { + clusterResp, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(cluster.ID) if err != nil { return false, err } - if clusterResp.ObjectMeta.State.Name == active && - nodestat.AllMachineReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) == nil { + if clusterResp.ObjectMeta.State.Name == states.Active && + nodestat.AllMachineReady(client, cluster.ID, timeouts.ThirtyMinute) == nil { return true, nil } return false, nil @@ -865,27 +863,27 @@ func DeleteRKE2K3SCustomClusterNodes(client *rancher.Client, clusterID string, c return err } - nodesSteveObjList, err := steveclient.SteveType("node").List(nil) + nodesSteveObjList, err := steveclient.SteveType(stevetypes.Node).List(nil) if err != nil { return err } for _, nodeToDelete := range nodesToDelete { for _, node := range nodesSteveObjList.Data { - if node.Annotations[internalIP] == nodeToDelete.PrivateIPAddress { - machine, err := client.Steve.SteveType(machineSteveResourceType).ByID(namespace + "/" + node.Annotations[machineNameAnnotation]) + if node.Annotations[annotations.InternalIp] == nodeToDelete.PrivateIPAddress { + machine, err := client.Steve.SteveType(stevetypes.Machine).ByID(namespaces.Fleet + "/" + node.Annotations[annotations.Machine]) if err != nil { return err } logrus.Infof("Deleting node %s from cluster %s", nodeToDelete.NodeID, cluster.Name) - err = client.Steve.SteveType(machineSteveResourceType).Delete(machine) + err = client.Steve.SteveType(stevetypes.Machine).Delete(machine) if err != nil { return err } - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - _, err = client.Steve.SteveType(machineSteveResourceType).ByID(machine.ID) + err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, timeouts.ThirtyMinute, true, func(ctx context.Context) (done bool, err error) { + _, err = client.Steve.SteveType(stevetypes.Machine).ByID(machine.ID) if err != nil { logrus.Infof("Node has successfully been deleted!") return true, nil @@ -922,7 +920,7 @@ func AddRKE1CustomClusterNodes(client *rancher.Client, cluster *management.Clust logrus.Infof(output) } - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { + err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, timeouts.ThirtyMinute, true, func(ctx context.Context) (done bool, err error) { client, err = client.ReLogin() if err != nil { return false, err @@ -933,8 +931,8 @@ func AddRKE1CustomClusterNodes(client *rancher.Client, cluster *management.Clust return false, err } - if clusterResp.State == active && - nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) == nil { + if clusterResp.State == states.Active && + nodestat.AllManagementNodeReady(client, cluster.ID, timeouts.ThirtyMinute) == nil { return true, nil } return false, nil @@ -957,7 +955,7 @@ func DeleteRKE1CustomClusterNodes(client *rancher.Client, cluster *management.Cl for _, nodeToDelete := range nodesToDelete { for _, node := range nodes.Data { - if node.Annotations[rke1ExternalIP] == nodeToDelete.PublicIPAddress { + if node.Annotations[annotations.ExternalIp] == nodeToDelete.PublicIPAddress { machine, err := client.Management.Node.ByID(node.ID) if err != nil { return err @@ -969,7 +967,7 @@ func DeleteRKE1CustomClusterNodes(client *rancher.Client, cluster *management.Cl return err } - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { + err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, timeouts.ThirtyMinute, true, func(ctx context.Context) (done bool, err error) { _, err = client.Management.Node.ByID(machine.ID) if err != nil { logrus.Infof("Node has successfully been deleted!") diff --git a/extensions/provisioning/ssh.go b/extensions/provisioning/ssh.go index 12b09406..7efd9272 100644 --- a/extensions/provisioning/ssh.go +++ b/extensions/provisioning/ssh.go @@ -12,7 +12,10 @@ import ( "time" "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" extnodes "github.com/rancher/shepherd/extensions/nodes" "github.com/rancher/shepherd/extensions/provisioninginput" "github.com/rancher/shepherd/pkg/nodes" @@ -26,9 +29,6 @@ const ( checkCPU provisioninginput.SSHTestCase = "CheckCPU" checkCPUCommand = "ps -C agent -o %cpu --no-header" nodeReboot provisioninginput.SSHTestCase = "NodeReboot" - activeState = "active" - runningState = "running" - fleetNamespace = "fleet-default" ) // CallSSHTestByName tests the ssh tests specified in the provisioninginput config clusterSSHTests field. @@ -59,12 +59,12 @@ func CallSSHTestByName(testCase provisioninginput.SSHTestCase, node *nodes.Node, return err } // Verify machine shuts down within five minutes, shutting down should not take longer than that depending on the ami - err = wait.Poll(1*time.Second, defaults.FiveMinuteTimeout, func() (bool, error) { - newNode, err := client.Steve.SteveType(machineSteveResourceType).ByID(fleetNamespace + "/" + machineName) + err = wait.Poll(1*time.Second, timeouts.FiveMinute, func() (bool, error) { + newNode, err := client.Steve.SteveType(stevetypes.Machine).ByID(namespaces.Fleet + "/" + machineName) if err != nil { return false, err } - if newNode.State.Name == runningState { + if newNode.State.Name == states.Running { return false, nil } return true, nil @@ -74,7 +74,7 @@ func CallSSHTestByName(testCase provisioninginput.SSHTestCase, node *nodes.Node, return err } - err = extnodes.AllMachineReady(client, clusterID, defaults.TenMinuteTimeout) + err = extnodes.AllMachineReady(client, clusterID, timeouts.TenMinute) if err != nil { logrus.Errorf("Node %s failed to reboot successfully", node.PublicIPAddress) return err diff --git a/extensions/provisioning/verify.go b/extensions/provisioning/verify.go index 550bd2bc..8e839721 100644 --- a/extensions/provisioning/verify.go +++ b/extensions/provisioning/verify.go @@ -15,7 +15,10 @@ import ( steveV1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/clusters/bundledclusters" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" "github.com/rancher/shepherd/extensions/etcdsnapshot" "github.com/rancher/shepherd/extensions/kubeconfig" nodestat "github.com/rancher/shepherd/extensions/nodes" @@ -40,8 +43,6 @@ import ( const ( logMessageKubernetesVersion = "Validating the current version is the upgraded one" hostnameLimit = 63 - machineNameAnnotation = "cluster.x-k8s.io/machine" - machineSteveResourceType = "cluster.x-k8s.io.machine" onDemandPrefix = "on-demand-" ) @@ -55,7 +56,7 @@ func VerifyRKE1Cluster(t *testing.T, client *rancher.Client, clustersConfig *clu watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ FieldSelector: "metadata.name=" + cluster.ID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) require.NoError(t, err) @@ -69,7 +70,7 @@ func VerifyRKE1Cluster(t *testing.T, client *rancher.Client, clustersConfig *clu require.NoError(t, err) assert.NotEmpty(t, clusterToken) - err = nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) + err = nodestat.AllManagementNodeReady(client, cluster.ID, timeouts.ThirtyMinute) require.NoError(t, err) if clustersConfig.PSACT == string(provisioninginput.RancherPrivileged) || clustersConfig.PSACT == string(provisioninginput.RancherRestricted) || clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { @@ -110,9 +111,9 @@ func VerifyCluster(t *testing.T, client *rancher.Client, clustersConfig *cluster kubeProvisioningClient, err := adminClient.GetKubeAPIProvisioningClient() require.NoError(t, err) - watchInterface, err := kubeProvisioningClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ + watchInterface, err := kubeProvisioningClient.Clusters(namespaces.Fleet).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + cluster.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) require.NoError(t, err) @@ -124,7 +125,7 @@ func VerifyCluster(t *testing.T, client *rancher.Client, clustersConfig *cluster require.NoError(t, err) assert.NotEmpty(t, clusterToken) - err = nodestat.AllMachineReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) + err = nodestat.AllMachineReady(client, cluster.ID, timeouts.ThirtyMinute) require.NoError(t, err) status := &provv1.ClusterStatus{} @@ -180,7 +181,7 @@ func VerifyHostedCluster(t *testing.T, client *rancher.Client, cluster *manageme watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ FieldSelector: "metadata.name=" + cluster.ID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) require.NoError(t, err) @@ -193,7 +194,7 @@ func VerifyHostedCluster(t *testing.T, client *rancher.Client, cluster *manageme require.NoError(t, err) assert.NotEmpty(t, clusterToken) - err = nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) + err = nodestat.AllManagementNodeReady(client, cluster.ID, timeouts.ThirtyMinute) require.NoError(t, err) podErrors := pods.StatusPods(client, cluster.ID) @@ -210,7 +211,7 @@ func VerifyDeleteRKE1Cluster(t *testing.T, client *rancher.Client, clusterID str watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ FieldSelector: "metadata.name=" + clusterID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) require.NoError(t, err) @@ -231,7 +232,7 @@ func VerifyDeleteRKE1Cluster(t *testing.T, client *rancher.Client, clusterID str // VerifyDeleteRKE2K3SCluster validates that a non-rke1 cluster and its resources are deleted. func VerifyDeleteRKE2K3SCluster(t *testing.T, client *rancher.Client, clusterID string) { - cluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(clusterID) + cluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(clusterID) require.NoError(t, err) adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) @@ -240,9 +241,9 @@ func VerifyDeleteRKE2K3SCluster(t *testing.T, client *rancher.Client, clusterID provKubeClient, err := adminClient.GetKubeAPIProvisioningClient() require.NoError(t, err) - watchInterface, err := provKubeClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ + watchInterface, err := provKubeClient.Clusters(namespaces.Fleet).Watch(context.TODO(), metav1.ListOptions{ FieldSelector: "metadata.name=" + cluster.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThirtyMinute), }) require.NoError(t, err) @@ -337,7 +338,7 @@ func VerifyHostnameLength(t *testing.T, client *rancher.Client, clusterObject *s query2, err := url.ParseQuery(fmt.Sprintf("labelSelector=%s=%s", capi.MachineDeploymentNameLabel, md.Name)) require.NoError(t, err) - machineResp, err := client.Steve.SteveType(machineSteveResourceType).List(query2) + machineResp, err := client.Steve.SteveType(stevetypes.Machine).List(query2) require.NoError(t, err) assert.True(t, len(machineResp.Data) > 0) @@ -401,7 +402,7 @@ func VerifySnapshots(client *rancher.Client, localclusterID string, clusterName var snapshotToBeRestored string var snapshotNameList []string s3Prefix := onDemandPrefix + clusterName - err = kwait.PollUntilContextTimeout(context.TODO(), 5*time.Second, defaults.FiveMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { + err = kwait.PollUntilContextTimeout(context.TODO(), 5*time.Second, timeouts.FiveMinute, true, func(ctx context.Context) (done bool, err error) { if isRKE1 { snapshotObjectList, err := etcdsnapshot.GetRKE1Snapshots(client, clusterName) if err != nil { @@ -460,7 +461,7 @@ func VerifySSHTests(t *testing.T, client *rancher.Client, clusterObject *steveV1 steveClient, err := client.Steve.ProxyDownstream(clusterID) require.NoError(t, err) - nodesSteveObjList, err := steveClient.SteveType("node").List(nil) + nodesSteveObjList, err := steveClient.SteveType(stevetypes.Node).List(nil) require.NoError(t, err) sshUser, err := sshkeys.GetSSHUser(client, clusterObject) @@ -471,7 +472,7 @@ func VerifySSHTests(t *testing.T, client *rancher.Client, clusterObject *steveV1 clusterNode, err := sshkeys.GetSSHNodeFromMachine(client, sshUser, &machine) require.NoError(t, err) - machineName := machine.Annotations[machineNameAnnotation] + machineName := machine.Annotations[annotations.Machine] err = CallSSHTestByName(tests, clusterNode, client, clusterID, machineName) require.NoError(t, err) diff --git a/extensions/provisioninginput/config.go b/extensions/provisioninginput/config.go index 83db33d0..f1ee3d8b 100644 --- a/extensions/provisioninginput/config.go +++ b/extensions/provisioninginput/config.go @@ -12,7 +12,6 @@ type PSACT string type SSHTestCase string const ( - Namespace = "fleet-default" defaultRandStringLength = 5 ConfigurationFileKey = "provisioningInput" PSPKubeVersionLimit Version = "v1.24.99" @@ -36,101 +35,6 @@ const ( ExternalProviderName ProviderName = "external" ) -var AllRolesMachinePool = MachinePools{ - MachinePoolConfig: machinepools.MachinePoolConfig{ - NodeRoles: machinepools.NodeRoles{ - Etcd: true, - ControlPlane: true, - Worker: true, - Quantity: 1, - }, - }, -} - -var EtcdControlPlaneMachinePool = MachinePools{ - MachinePoolConfig: machinepools.MachinePoolConfig{ - NodeRoles: machinepools.NodeRoles{ - Etcd: true, - ControlPlane: true, - Quantity: 1, - }, - }, -} - -var EtcdMachinePool = MachinePools{ - MachinePoolConfig: machinepools.MachinePoolConfig{ - NodeRoles: machinepools.NodeRoles{ - Etcd: true, - Quantity: 1, - }, - }, -} - -var ControlPlaneMachinePool = MachinePools{ - MachinePoolConfig: machinepools.MachinePoolConfig{ - NodeRoles: machinepools.NodeRoles{ - ControlPlane: true, - Quantity: 1, - }, - }, -} - -var WorkerMachinePool = MachinePools{ - MachinePoolConfig: machinepools.MachinePoolConfig{ - NodeRoles: machinepools.NodeRoles{ - Worker: true, - Quantity: 1, - }, - }, -} - -var WindowsMachinePool = MachinePools{ - MachinePoolConfig: machinepools.MachinePoolConfig{ - NodeRoles: machinepools.NodeRoles{ - Windows: true, - Quantity: 1, - }, - }, -} - -var AllRolesNodePool = NodePools{ - NodeRoles: nodepools.NodeRoles{ - Etcd: true, - ControlPlane: true, - Worker: true, - Quantity: 1, - }, -} - -var EtcdControlPlaneNodePool = NodePools{ - NodeRoles: nodepools.NodeRoles{ - Etcd: true, - ControlPlane: true, - Quantity: 1, - }, -} - -var EtcdNodePool = NodePools{ - NodeRoles: nodepools.NodeRoles{ - Etcd: true, - Quantity: 1, - }, -} - -var ControlPlaneNodePool = NodePools{ - NodeRoles: nodepools.NodeRoles{ - ControlPlane: true, - Quantity: 1, - }, -} - -var WorkerNodePool = NodePools{ - NodeRoles: nodepools.NodeRoles{ - Worker: true, - Quantity: 1, - }, -} - // String stringer for the ProviderName func (p ProviderName) String() string { return string(p) @@ -187,7 +91,7 @@ type Registries struct { type MachinePools struct { machinepools.Pools - MachinePoolConfig machinepools.MachinePoolConfig `json:"machinePoolConfig,omitempty" yaml:"machinePoolConfig,omitempty" default:"[]"` + MachinePoolConfig machinepools.MachinePoolConfig `json:"machinePoolConfig,omitempty" yaml:"machinePoolConfig,omitempty" default:"{}"` IsSecure bool `json:"isSecure,omitempty" yaml:"isSecure,omitempty" default:"false"` } diff --git a/extensions/provisioninginput/machinepools.yml b/extensions/provisioninginput/machinepools.yml new file mode 100644 index 00000000..058e44e2 --- /dev/null +++ b/extensions/provisioninginput/machinepools.yml @@ -0,0 +1,38 @@ +etcdControlPlaneWorker: + machinePools: + machinePoolConfig: + etcd: true + controlplane: true + worker: true + quantity: 1 + +etcdControlPlane: + machinePools: + machinePoolConfig: + etcd: true + controlplane: true + quantity: 1 + +etcd: + machinePools: + machinePoolConfig: + etcd: true + quantity: 1 + +controlPlane: + machinePools: + machinePoolConfig: + controlplane: true + quantity: 1 + +worker: + machinePools: + machinePoolConfig: + worker: true + quantity: 1 + +windows: + machinePools: + machinePoolConfig: + windows: true + quantity: 1 \ No newline at end of file diff --git a/extensions/provisioninginput/nodepools.yml b/extensions/provisioninginput/nodepools.yml new file mode 100644 index 00000000..d59c638c --- /dev/null +++ b/extensions/provisioninginput/nodepools.yml @@ -0,0 +1,38 @@ +etcdControlPlaneWorker: + nodePools: + nodeRoles: + etcd: true + controlplane: true + worker: true + quantity: 1 + +etcdControlPlane: + nodePools: + nodeRoles: + etcd: true + controlplane: true + quantity: 1 + +etcd: + nodePools: + nodeRoles: + etcd: true + quantity: 1 + +controlPlane: + nodePools: + nodeRoles: + controlplane: true + quantity: 1 + +worker: + nodePools: + nodeRoles: + worker: true + quantity: 1 + +windows: + nodePools: + nodeRoles: + windows: true + quantity: 1 \ No newline at end of file diff --git a/extensions/provisioninginput/pools.go b/extensions/provisioninginput/pools.go new file mode 100644 index 00000000..f7642028 --- /dev/null +++ b/extensions/provisioninginput/pools.go @@ -0,0 +1,41 @@ +package provisioninginput + +import ( + "path" + "runtime" + + "github.com/rancher/shepherd/extensions/defaults" +) + +const ( + machinePoolsFile = "machinepools.yml" + nodePoolsFile = "nodepools.yml" +) + +func GetMachinePoolConfigs(machinePoolConfigNames []string) []MachinePools { + machinePoolConfigs := []MachinePools{} + _, filename, _, _ := runtime.Caller(0) + file := path.Join(path.Dir(filename), machinePoolsFile) + + for _, name := range machinePoolConfigNames { + myconfig := new(MachinePools) + defaults.LoadDefault(file, name, myconfig) + machinePoolConfigs = append(machinePoolConfigs, *myconfig) + } + + return machinePoolConfigs +} + +func GetNodePoolConfigs(nodePoolConfigNames []string) []NodePools { + nodePoolConfigs := []NodePools{} + _, filename, _, _ := runtime.Caller(0) + file := path.Join(path.Dir(filename), nodePoolsFile) + + for _, name := range nodePoolConfigNames { + myconfig := new(NodePools) + defaults.LoadDefault(file, name, myconfig) + nodePoolConfigs = append(nodePoolConfigs, *myconfig) + } + + return nodePoolConfigs +} diff --git a/extensions/psact/createdeployment.go b/extensions/psact/createdeployment.go index e22fe7f6..04b80af9 100644 --- a/extensions/psact/createdeployment.go +++ b/extensions/psact/createdeployment.go @@ -7,6 +7,8 @@ import ( "github.com/rancher/shepherd/clients/rancher" steveV1 "github.com/rancher/shepherd/clients/rancher/v1" + defaultlabels "github.com/rancher/shepherd/extensions/defaults/labels" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/extensions/provisioninginput" "github.com/rancher/shepherd/extensions/workloads" namegenerator "github.com/rancher/shepherd/pkg/namegenerator" @@ -29,7 +31,7 @@ const ( // deployment should successfully create. If the PSACT value is rancher-unprivileged, then the deployment should fail to create. func CreateNginxDeployment(client *rancher.Client, clusterID string, psact string) error { labels := map[string]string{} - labels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.deployment-%v-%v", namespace, workload) + labels[defaultlabels.WorkloadSelector] = fmt.Sprintf("apps.deployment-%v-%v", namespace, workload) containerTemplate := workloads.NewContainer(containerName, imageName, v1.PullAlways, []v1.VolumeMount{}, []v1.EnvFromSource{}, nil, nil, nil) podTemplate := workloads.NewPodTemplate([]v1.Container{containerTemplate}, []v1.Volume{}, []v1.LocalObjectReference{}, labels) @@ -41,12 +43,12 @@ func CreateNginxDeployment(client *rancher.Client, clusterID string, psact strin } // If the deployment already exists, then create a new deployment with a different name to avoid a naming conflict. - if _, err := steveclient.SteveType(workloads.DeploymentSteveType).ByID(deploymentTemplate.Namespace + "/" + deploymentTemplate.Name); err == nil { + if _, err := steveclient.SteveType(stevetypes.Deployment).ByID(deploymentTemplate.Namespace + "/" + deploymentTemplate.Name); err == nil { deploymentTemplate.Name = deploymentTemplate.Name + "-" + namegenerator.RandStringLower(5) } logrus.Infof("Creating deployment %s", deploymentTemplate.Name) - _, err = steveclient.SteveType(workloads.DeploymentSteveType).Create(deploymentTemplate) + _, err = steveclient.SteveType(stevetypes.Deployment).Create(deploymentTemplate) if err != nil { return err } @@ -57,7 +59,7 @@ func CreateNginxDeployment(client *rancher.Client, clusterID string, psact strin return false, err } - deploymentResp, err := steveclient.SteveType(workloads.DeploymentSteveType).ByID(deploymentTemplate.Namespace + "/" + deploymentTemplate.Name) + deploymentResp, err := steveclient.SteveType(stevetypes.Deployment).ByID(deploymentTemplate.Namespace + "/" + deploymentTemplate.Name) if err != nil { // We don't want to return the error so we don't exit the poll too soon. // There could be delay of when the deployment is created. @@ -88,13 +90,13 @@ func CreateNginxDeployment(client *rancher.Client, clusterID string, psact strin return err } - deploymentResp, err := steveclient.SteveType(workloads.DeploymentSteveType).ByID(deploymentTemplate.Namespace + "/" + deploymentTemplate.Name) + deploymentResp, err := steveclient.SteveType(stevetypes.Deployment).ByID(deploymentTemplate.Namespace + "/" + deploymentTemplate.Name) if err != nil { return err } logrus.Infof("Deleting deployment %s", deploymentResp.Name) - err = steveclient.SteveType(workloads.DeploymentSteveType).Delete(deploymentResp) + err = steveclient.SteveType(stevetypes.Deployment).Delete(deploymentResp) if err != nil { return err } diff --git a/extensions/rancherleader/rancherleader.go b/extensions/rancherleader/rancherleader.go index 8aebc1b3..db2b4323 100644 --- a/extensions/rancherleader/rancherleader.go +++ b/extensions/rancherleader/rancherleader.go @@ -5,6 +5,8 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/namespaces" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" coordinationv1 "k8s.io/api/coordination/v1" ) @@ -17,7 +19,7 @@ const ( // GetRancherLeaderPodName is a helper function to retrieve the name of the rancher leader pod func GetRancherLeaderPodName(client *rancher.Client) (string, error) { query := url.Values{"fieldSelector": {"metadata.name=" + LeaseName}} - lease, err := client.Steve.SteveType(LeaseSteveType).NamespacedSteveClient(KubeSystemNamespace).List(query) + lease, err := client.Steve.SteveType(stevetypes.CoordinationLease).NamespacedSteveClient(namespaces.KubeSystem).List(query) if err != nil { return "", err } diff --git a/extensions/rbac/verify.go b/extensions/rbac/verify.go index 6d42b603..1c03e5c4 100644 --- a/extensions/rbac/verify.go +++ b/extensions/rbac/verify.go @@ -11,7 +11,8 @@ import ( "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/extensions/namespaces" "github.com/rancher/shepherd/extensions/projects" "github.com/rancher/shepherd/extensions/users" @@ -32,7 +33,6 @@ const ( roleProjectReadOnly = "read-only" restrictedAdmin = "restricted-admin" standardUser = "user" - activeStatus = "active" forbiddenError = "403 Forbidden" ) @@ -41,14 +41,14 @@ var rgx = regexp.MustCompile(`\[(.*?)\]`) // VerifyGlobalRoleBindingsForUser validates that a global role bindings is created for a user when the user is created func VerifyGlobalRoleBindingsForUser(t *testing.T, user *management.User, adminClient *rancher.Client) { query := url.Values{"filter": {"userName=" + user.ID}} - grbs, err := adminClient.Steve.SteveType("management.cattle.io.globalrolebinding").List(query) + grbs, err := adminClient.Steve.SteveType(stevetypes.GlobalRoleBinding).List(query) require.NoError(t, err) assert.Equal(t, 1, len(grbs.Data)) } // VerifyUserCanListCluster validates a user with the required global permissions are able to/not able to list the clusters in rancher server func VerifyUserCanListCluster(t *testing.T, client, standardClient *rancher.Client, clusterID, role string) { - clusterList, err := standardClient.Steve.SteveType(clusters.ProvisioningSteveResourceType).ListAll(nil) + clusterList, err := standardClient.Steve.SteveType(stevetypes.Provisioning).ListAll(nil) require.NoError(t, err) clusterStatus := &apiV1.ClusterStatus{} @@ -56,7 +56,7 @@ func VerifyUserCanListCluster(t *testing.T, client, standardClient *rancher.Clie require.NoError(t, err) if role == restrictedAdmin { - adminClusterList, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ListAll(nil) + adminClusterList, err := client.Steve.SteveType(stevetypes.Provisioning).ListAll(nil) require.NoError(t, err) assert.Equal(t, (len(adminClusterList.Data) - 1), len(clusterList.Data)) } @@ -93,7 +93,7 @@ func VerifyUserCanCreateProjects(t *testing.T, client, standardClient *rancher.C require.NoError(t, err) log.Info("Created project as a ", role, " is ", memberProject.Name) actualStatus := fmt.Sprintf("%v", memberProject.State) - assert.Equal(t, activeStatus, strings.ToLower(actualStatus)) + assert.Equal(t, states.Active, strings.ToLower(actualStatus)) case roleProjectOwner, roleProjectMember: require.Error(t, err) errStatus := strings.Split(err.Error(), ".")[1] @@ -121,7 +121,7 @@ func VerifyUserCanCreateNamespace(t *testing.T, client, standardClient *rancher. err = v1.ConvertToK8sType(createdNamespace.Status, namespaceStatus) require.NoError(t, err) actualStatus := fmt.Sprintf("%v", namespaceStatus.Phase) - assert.Equal(t, activeStatus, strings.ToLower(actualStatus)) + assert.Equal(t, states.Active, strings.ToLower(actualStatus)) case roleMember: require.Error(t, checkErr) errStatus := strings.Split(checkErr.Error(), ".")[1] @@ -139,11 +139,11 @@ func VerifyUserCanListNamespace(t *testing.T, client, standardClient *rancher.Cl steveStandardClient, err := standardClient.Steve.ProxyDownstream(clusterID) require.NoError(t, err) - namespaceListAdmin, err := steveAdminClient.SteveType(namespaces.NamespaceSteveType).List(nil) + namespaceListAdmin, err := steveAdminClient.SteveType(stevetypes.Namespace).List(nil) require.NoError(t, err) sortedNamespaceListAdmin := namespaceListAdmin.Names() - namespaceListNonAdmin, err := steveStandardClient.SteveType(namespaces.NamespaceSteveType).List(nil) + namespaceListNonAdmin, err := steveStandardClient.SteveType(stevetypes.Namespace).List(nil) require.NoError(t, err) sortedNamespaceListNonAdmin := namespaceListNonAdmin.Names() @@ -175,9 +175,9 @@ func VerifyUserCanDeleteNamespace(t *testing.T, client, standardClient *rancher. adminNamespace, err := namespaces.CreateNamespace(client, namespaceName+"-admin", "{}", map[string]string{}, map[string]string{}, project) require.NoError(t, err) - namespaceID, err := steveAdminClient.SteveType(namespaces.NamespaceSteveType).ByID(adminNamespace.ID) + namespaceID, err := steveAdminClient.SteveType(stevetypes.Namespace).ByID(adminNamespace.ID) require.NoError(t, err) - err = steveStandardClient.SteveType(namespaces.NamespaceSteveType).Delete(namespaceID) + err = steveStandardClient.SteveType(stevetypes.Namespace).Delete(namespaceID) switch role { case roleOwner, roleProjectOwner, roleProjectMember, restrictedAdmin: diff --git a/extensions/registries/registries.go b/extensions/registries/registries.go index 07543769..a0fca48b 100644 --- a/extensions/registries/registries.go +++ b/extensions/registries/registries.go @@ -7,6 +7,7 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/extensions/workloads/pods" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" @@ -20,7 +21,7 @@ func CheckAllClusterPodsForRegistryPrefix(client *rancher.Client, clusterID, reg return false, err } - steveClient := downstreamClient.SteveType(pods.PodResourceSteveType) + steveClient := downstreamClient.SteveType(stevetypes.Pod) podsList, err := steveClient.List(nil) if err != nil { return false, err diff --git a/extensions/resourcequotas/resourcequotas.go b/extensions/resourcequotas/resourcequotas.go index 03064631..7e71e2da 100644 --- a/extensions/resourcequotas/resourcequotas.go +++ b/extensions/resourcequotas/resourcequotas.go @@ -4,20 +4,18 @@ import ( "time" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" kwait "k8s.io/apimachinery/pkg/util/wait" ) -const ( - ResourceQuotasSteveType = "resourcequota" -) - // CheckResourceActiveState is a function that uses the Steve API to check if the resource quota is in an active state func CheckResourceActiveState(client *rancher.Client, resourceQuotaID string) error { return kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) { - steveResourceQuota, err := client.Steve.SteveType(ResourceQuotasSteveType).ByID(resourceQuotaID) + steveResourceQuota, err := client.Steve.SteveType(stevetypes.ResourceQuota).ByID(resourceQuotaID) if err != nil { return false, err - } else if steveResourceQuota.State.Name == "active" { + } else if steveResourceQuota.State.Name == states.Active { return true, nil } diff --git a/extensions/rke1/componentchecks/etcdversion.go b/extensions/rke1/componentchecks/etcdversion.go index 3c9cdfb0..57085c7c 100644 --- a/extensions/rke1/componentchecks/etcdversion.go +++ b/extensions/rke1/componentchecks/etcdversion.go @@ -4,6 +4,9 @@ import ( "strings" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/labels" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/nodes" "github.com/sirupsen/logrus" ) @@ -15,7 +18,7 @@ func CheckETCDVersion(client *rancher.Client, nodes []*nodes.Node, clusterID str return nil, err } - nodesList, err := steveClient.SteveType("node").List(nil) + nodesList, err := steveClient.SteveType(stevetypes.Node).List(nil) if err != nil { return nil, err } @@ -23,8 +26,8 @@ func CheckETCDVersion(client *rancher.Client, nodes []*nodes.Node, clusterID str var etcdResult []string for _, rancherNode := range nodesList.Data { - externalIP := rancherNode.Annotations["rke.cattle.io/external-ip"] - etcdRole := rancherNode.Labels["node-role.kubernetes.io/etcd"] == "true" + externalIP := rancherNode.Annotations[annotations.ExternalIp] + etcdRole := rancherNode.Labels[labels.EtcdRole] == "true" if etcdRole == true { for _, node := range nodes { diff --git a/extensions/rke1/nodepools/nodepools.go b/extensions/rke1/nodepools/nodepools.go index 94f2c2a6..333d4273 100644 --- a/extensions/rke1/nodepools/nodepools.go +++ b/extensions/rke1/nodepools/nodepools.go @@ -8,16 +8,13 @@ import ( "github.com/rancher/norman/types" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/states" + "github.com/rancher/shepherd/extensions/defaults/timeouts" nodestat "github.com/rancher/shepherd/extensions/nodes" "github.com/sirupsen/logrus" kwait "k8s.io/apimachinery/pkg/util/wait" ) -const ( - active = "active" -) - type NodeRoles struct { ControlPlane bool `json:"controlplane,omitempty" yaml:"controlplane,omitempty"` Etcd bool `json:"etcd,omitempty" yaml:"etcd,omitempty"` @@ -111,13 +108,13 @@ func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, return nil, err } - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { + err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, timeouts.ThirtyMinute, true, func(ctx context.Context) (done bool, err error) { clusterResp, err := client.Management.Cluster.ByID(cluster.ID) if err != nil { return false, err } - if clusterResp.State == active && nodestat.AllManagementNodeReady(client, clusterResp.ID, defaults.ThirtyMinuteTimeout) == nil { + if clusterResp.State == states.Active && nodestat.AllManagementNodeReady(client, clusterResp.ID, timeouts.ThirtyMinute) == nil { return true, nil } return false, nil diff --git a/extensions/serviceaccounts/serviceaccounts.go b/extensions/serviceaccounts/serviceaccounts.go index 342e44c7..bf31064b 100644 --- a/extensions/serviceaccounts/serviceaccounts.go +++ b/extensions/serviceaccounts/serviceaccounts.go @@ -6,6 +6,7 @@ import ( "time" "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/defaults/states" kwait "k8s.io/apimachinery/pkg/util/wait" ) @@ -27,7 +28,7 @@ func IsServiceAccountReady(rancherClient *rancher.Client, clusterID, namespace, return false, nil } return false, err - } else if serviceAccount.State.Name == "active" { + } else if serviceAccount.State.Name == states.Active { return true, nil } diff --git a/extensions/services/verify.go b/extensions/services/verify.go index 8736bd38..cdd2eec2 100644 --- a/extensions/services/verify.go +++ b/extensions/services/verify.go @@ -7,6 +7,7 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/extensions/ingresses" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -27,7 +28,7 @@ func VerifyAWSLoadBalancer(t *testing.T, client *rancher.Client, serviceLB *v1.S lbHostname := "" err = kwait.Poll(5*time.Second, 1*time.Minute, func() (done bool, err error) { - updateService, err := steveclient.SteveType("service").ByID(serviceLB.ID) + updateService, err := steveclient.SteveType(stevetypes.Service).ByID(serviceLB.ID) if err != nil { return false, nil } diff --git a/extensions/settings/shell.go b/extensions/settings/shell.go index d0921243..b9c99e4b 100644 --- a/extensions/settings/shell.go +++ b/extensions/settings/shell.go @@ -4,6 +4,7 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" ) // ShellVersion is a helper that gets the shell setting json based on the ID and return the shell image value. @@ -14,7 +15,7 @@ func ShellVersion(client *rancher.Client, clusterID, resourceName string) (strin } shellSetting := &v3.Setting{} - shellSettingResp, err := steveClient.SteveType("management.cattle.io.setting").ByID("shell-image") + shellSettingResp, err := steveClient.SteveType(stevetypes.ManagementSetting).ByID("shell-image") if err != nil { return "", err } diff --git a/extensions/sshkeys/downloadsshkeys.go b/extensions/sshkeys/downloadsshkeys.go index 89255a34..8e6a8015 100644 --- a/extensions/sshkeys/downloadsshkeys.go +++ b/extensions/sshkeys/downloadsshkeys.go @@ -11,24 +11,23 @@ import ( "github.com/rancher/shepherd/clients/rancher" steveV1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults/annotations" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" kubeapinodes "github.com/rancher/shepherd/extensions/kubeapi/nodes" "github.com/rancher/shepherd/pkg/nodes" corev1 "k8s.io/api/core/v1" ) const ( - privateKeySSHKeyRegExPattern = `-----BEGIN RSA PRIVATE KEY-{3,}\n([\s\S]*?)\n-{3,}END RSA PRIVATE KEY-----` - ClusterMachineConstraintResourceSteveType = "cluster.x-k8s.io.machine" - ClusterMachineAnnotation = "cluster.x-k8s.io/machine" - - rootUser = "root" + privateKeySSHKeyRegExPattern = `-----BEGIN RSA PRIVATE KEY-{3,}\n([\s\S]*?)\n-{3,}END RSA PRIVATE KEY-----` + rootUser = "root" ) // DownloadSSHKeys is a helper function that takes a client, the machinePoolNodeName to download // the ssh key for a particular node. func DownloadSSHKeys(client *rancher.Client, machinePoolNodeName string) ([]byte, error) { machinePoolNodeNameName := fmt.Sprintf("fleet-default/%s", machinePoolNodeName) - machine, err := client.Steve.SteveType(ClusterMachineConstraintResourceSteveType).ByID(machinePoolNodeNameName) + machine, err := client.Steve.SteveType(stevetypes.Machine).ByID(machinePoolNodeNameName) if err != nil { return nil, err } @@ -61,7 +60,7 @@ func DownloadSSHKeys(client *rancher.Client, machinePoolNodeName string) ([]byte // GetSSHNodeFromMachine returns the v1/node object given a steve/v1/machine object. func GetSSHNodeFromMachine(client *rancher.Client, sshUser string, machine *steveV1.SteveAPIObject) (*nodes.Node, error) { - machineName := machine.Annotations[ClusterMachineAnnotation] + machineName := machine.Annotations[annotations.Machine] sshkey, err := DownloadSSHKeys(client, machineName) if err != nil { return nil, err diff --git a/extensions/users/users.go b/extensions/users/users.go index d6880391..440ddd11 100644 --- a/extensions/users/users.go +++ b/extensions/users/users.go @@ -9,6 +9,8 @@ import ( v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" "github.com/rancher/shepherd/clients/rancher" management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + defaultLabels "github.com/rancher/shepherd/extensions/defaults/labels" + "github.com/rancher/shepherd/extensions/defaults/timeouts" extauthz "github.com/rancher/shepherd/extensions/kubeapi/authorization" "github.com/rancher/shepherd/extensions/kubeapi/rbac" password "github.com/rancher/shepherd/extensions/users/passwordgenerator" @@ -25,12 +27,6 @@ import ( "k8s.io/apimachinery/pkg/watch" ) -const ( - rtbOwnerLabel = "authz.cluster.cattle.io/rtb-owner-updated" -) - -var timeout = int64(60 * 3) - // UserConfig sets and returns username and password of the user func UserConfig() (user *management.User) { enabled := true @@ -91,7 +87,7 @@ func AddProjectMember(rancherClient *rancher.Client, project *management.Project opts := metav1.ListOptions{ FieldSelector: "metadata.name=" + name, - TimeoutSeconds: &timeout, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThreeMinute), } watchInterface, err := adminClient.GetManagementWatchInterface(management.ProjectType, opts) if err != nil { @@ -197,7 +193,7 @@ func AddClusterRoleToUser(rancherClient *rancher.Client, cluster *management.Clu opts := metav1.ListOptions{ FieldSelector: "metadata.name=" + cluster.ID, - TimeoutSeconds: &timeout, + TimeoutSeconds: timeouts.WatchTimeout(timeouts.ThreeMinute), } watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, opts) if err != nil { @@ -318,7 +314,7 @@ const ( func waitForCRTBRollout(client *rancher.Client, crtb *management.ClusterRoleTemplateBinding, opType operationType) error { crtbNamespace, crtbName := ref.Parse(crtb.ID) - req, err := labels.NewRequirement(rtbOwnerLabel, selection.In, []string{fmt.Sprintf("%s_%s", crtbNamespace, crtbName)}) + req, err := labels.NewRequirement(defaultLabels.RtbOwnerUpdated, selection.In, []string{fmt.Sprintf("%s_%s", crtbNamespace, crtbName)}) if err != nil { return fmt.Errorf("unable to form label requirement for %s/%s: %w", crtbNamespace, crtbName, err) } diff --git a/extensions/workloads/create.go b/extensions/workloads/create.go index 37138e08..f5eb9a30 100644 --- a/extensions/workloads/create.go +++ b/extensions/workloads/create.go @@ -2,6 +2,7 @@ package workloads import ( steveV1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/sirupsen/logrus" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -16,7 +17,7 @@ const ( // CreateDeploymentWithService is a helper function to create a deployment and service in the downstream cluster. func CreateDeploymentWithService(steveclient *steveV1.Client, wlName string, deployment *v1.Deployment, service corev1.Service) (*steveV1.SteveAPIObject, *steveV1.SteveAPIObject, error) { logrus.Infof("Creating deployment: %s", wlName) - deploymentResp, err := steveclient.SteveType(DeploymentSteveType).Create(deployment) + deploymentResp, err := steveclient.SteveType(stevetypes.Deployment).Create(deployment) if err != nil { logrus.Errorf("Failed to create deployment: %s", wlName) return nil, nil, err @@ -25,7 +26,7 @@ func CreateDeploymentWithService(steveclient *steveV1.Client, wlName string, dep logrus.Infof("Successfully created deployment: %s", wlName) logrus.Infof("Creating service: %s", service.Name) - serviceResp, err := steveclient.SteveType(ServiceType).Create(service) + serviceResp, err := steveclient.SteveType(stevetypes.Service).Create(service) if err != nil { logrus.Errorf("Failed to create service: %s", service.Name) return nil, nil, err diff --git a/extensions/workloads/pods/pod_status.go b/extensions/workloads/pods/pod_status.go index aa1934ff..70336818 100644 --- a/extensions/workloads/pods/pod_status.go +++ b/extensions/workloads/pods/pod_status.go @@ -6,7 +6,8 @@ import ( "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/defaults" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/defaults/timeouts" corev1 "k8s.io/api/core/v1" kwait "k8s.io/apimachinery/pkg/util/wait" ) @@ -25,8 +26,8 @@ func StatusPods(client *rancher.Client, clusterID string) []error { var podErrors []error - steveClient := downstreamClient.SteveType(PodResourceSteveType) - err = kwait.Poll(5*time.Second, defaults.FifteenMinuteTimeout, func() (done bool, err error) { + steveClient := downstreamClient.SteveType(stevetypes.Pod) + err = kwait.Poll(5*time.Second, timeouts.FifteenMinute, func() (done bool, err error) { // emptying pod errors every time we poll so that we don't return stale errors podErrors = []error{} diff --git a/extensions/workloads/pods/verify.go b/extensions/workloads/pods/verify.go index ba40ee6d..84dddd3c 100644 --- a/extensions/workloads/pods/verify.go +++ b/extensions/workloads/pods/verify.go @@ -7,6 +7,7 @@ import ( provv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appv1 "k8s.io/api/apps/v1" @@ -27,7 +28,7 @@ func VerifyReadyDaemonsetPods(t *testing.T, client *rancher.Client, cluster *v1. daemonsetequals := false err = wait.Poll(500*time.Millisecond, 5*time.Minute, func() (dameonsetequals bool, err error) { - daemonsets, err := client.Steve.SteveType(DaemonsetSteveType).ByID(status.ClusterName) + daemonsets, err := client.Steve.SteveType(stevetypes.Daemonset).ByID(status.ClusterName) require.NoError(t, err) daemonsetsStatusType := &appv1.DaemonSetStatus{} @@ -41,7 +42,7 @@ func VerifyReadyDaemonsetPods(t *testing.T, client *rancher.Client, cluster *v1. }) require.NoError(t, err) - daemonsets, err := client.Steve.SteveType(DaemonsetSteveType).ByID(status.ClusterName) + daemonsets, err := client.Steve.SteveType(stevetypes.Daemonset).ByID(status.ClusterName) require.NoError(t, err) daemonsetsStatusType := &appv1.DaemonSetStatus{} diff --git a/extensions/workloads/template.go b/extensions/workloads/template.go index c51c9d29..91962a1f 100644 --- a/extensions/workloads/template.go +++ b/extensions/workloads/template.go @@ -3,6 +3,7 @@ package workloads import ( "fmt" + "github.com/rancher/shepherd/extensions/defaults/labels" appv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -48,8 +49,8 @@ func NewDeploymentTemplate(deploymentName string, namespace string, template cor } if isCattleLabeled { - matchLabels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.deployment-%v-%v", namespace, deploymentName) - template.ObjectMeta.Labels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.deployment-%v-%v", namespace, deploymentName) + matchLabels[labels.WorkloadSelector] = fmt.Sprintf("apps.deployment-%v-%v", namespace, deploymentName) + template.ObjectMeta.Labels[labels.WorkloadSelector] = fmt.Sprintf("apps.deployment-%v-%v", namespace, deploymentName) } return &appv1.Deployment{ @@ -74,8 +75,8 @@ func NewDaemonSetTemplate(daemonsetName string, namespace string, template corev } if isCattleLabeled { - matchLabels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.daemonset-%v-%v", namespace, daemonsetName) - template.ObjectMeta.Labels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.daemonset-%v-%v", namespace, daemonsetName) + matchLabels[labels.WorkloadSelector] = fmt.Sprintf("apps.daemonset-%v-%v", namespace, daemonsetName) + template.ObjectMeta.Labels[labels.WorkloadSelector] = fmt.Sprintf("apps.daemonset-%v-%v", namespace, daemonsetName) } return &appv1.DaemonSet{ diff --git a/extensions/workloads/verify.go b/extensions/workloads/verify.go index 937fdf33..365f462b 100644 --- a/extensions/workloads/verify.go +++ b/extensions/workloads/verify.go @@ -4,6 +4,7 @@ import ( "time" steveV1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" appv1 "k8s.io/api/apps/v1" kwait "k8s.io/apimachinery/pkg/util/wait" ) @@ -14,7 +15,7 @@ func VerifyDeployment(steveClient *steveV1.Client, deployment *steveV1.SteveAPIO if err != nil { return false, nil } - deploymentResp, err := steveClient.SteveType(DeploymentSteveType).ByID(deployment.Namespace + "/" + deployment.Name) + deploymentResp, err := steveClient.SteveType(stevetypes.Deployment).ByID(deployment.Namespace + "/" + deployment.Name) if err != nil { return false, nil } diff --git a/go.mod b/go.mod index eab120b6..d4e1d0cb 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/rancher/shepherd -go 1.22 +go 1.22.0 replace ( k8s.io/api => k8s.io/api v0.28.6 diff --git a/pkg/codegen/generator/generator.go b/pkg/codegen/generator/generator.go index 19222f07..105a1e31 100644 --- a/pkg/codegen/generator/generator.go +++ b/pkg/codegen/generator/generator.go @@ -8,7 +8,7 @@ import ( "github.com/rancher/norman/types" ) -var ( +const ( outputDir = "./" baseCattle = "clients/rancher/generated" ) diff --git a/pkg/codegen/main.go b/pkg/codegen/main.go index fefdd0fc..9f420124 100644 --- a/pkg/codegen/main.go +++ b/pkg/codegen/main.go @@ -12,6 +12,7 @@ import ( managementv3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" provisioningv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" + "github.com/rancher/shepherd/extensions/defaults/stevetypes" "github.com/rancher/shepherd/pkg/codegen/generator" managementSchema "github.com/rancher/shepherd/pkg/schemas/management.cattle.io/v3" planv1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1" @@ -95,7 +96,7 @@ func main() { clusterAPIVersion := &types.APIVersion{Group: capi.GroupVersion.Group, Version: capi.GroupVersion.Version, Path: "/v1"} generator.GenerateClient(factory.Schemas(clusterAPIVersion).Init(func(schemas *types.Schemas) *types.Schemas { return schemas.MustImportAndCustomize(clusterAPIVersion, capi.Machine{}, func(schema *types.Schema) { - schema.ID = "cluster.x-k8s.io.machine" + schema.ID = stevetypes.Machine }) }), nil) diff --git a/pkg/ref/parse.go b/pkg/ref/parse.go index 25500ee0..717e2cc6 100644 --- a/pkg/ref/parse.go +++ b/pkg/ref/parse.go @@ -8,7 +8,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -var NodeNotFound = "can not build dialer to" +const NodeNotFound = "can not build dialer to" func IsNodeNotFound(err error) bool { if err == nil { diff --git a/pkg/schemas/mapper/cross_version_object.go b/pkg/schemas/mapper/cross_version_object.go index b4fe4bf1..93301877 100644 --- a/pkg/schemas/mapper/cross_version_object.go +++ b/pkg/schemas/mapper/cross_version_object.go @@ -10,27 +10,6 @@ import ( "github.com/rancher/norman/types/convert" ) -var ( - kindMap = map[string]string{ - "deployment": "Deployment", - "replicationcontroller": "ReplicationController", - "statefulset": "StatefulSet", - "daemonset": "DaemonSet", - "job": "Job", - "cronjob": "CronJob", - "replicaset": "ReplicaSet", - } - groupVersionMap = map[string]string{ - "deployment": "apps/v1beta2", - "replicationcontroller": "core/v1", - "statefulset": "apps/v1beta2", - "daemonset": "apps/v1beta2", - "job": "batch/v1", - "cronjob": "batch/v1beta1", - "replicaset": "apps/v1beta2", - } -) - type CrossVersionObjectToWorkload struct { Field string } @@ -40,6 +19,16 @@ func (c CrossVersionObjectToWorkload) ToInternal(data map[string]interface{}) er if !ok { return nil } + + groupVersionMap := map[string]string{ + "deployment": "apps/v1beta2", + "replicationcontroller": "core/v1", + "statefulset": "apps/v1beta2", + "daemonset": "apps/v1beta2", + "job": "batch/v1", + "cronjob": "batch/v1beta1", + "replicaset": "apps/v1beta2", + } workloadID := convert.ToString(obj) parts := strings.SplitN(workloadID, ":", 3) newObj := map[string]interface{}{ @@ -72,5 +61,14 @@ func (c CrossVersionObjectToWorkload) ModifySchema(schema *types.Schema, schemas } func getKind(i string) string { + kindMap := map[string]string{ + "deployment": "Deployment", + "replicationcontroller": "ReplicationController", + "statefulset": "StatefulSet", + "daemonset": "DaemonSet", + "job": "Job", + "cronjob": "CronJob", + "replicaset": "ReplicaSet", + } return kindMap[i] } diff --git a/pkg/schemas/mapper/node_address.go b/pkg/schemas/mapper/node_address.go index 4f1794ad..9b2d0e7b 100644 --- a/pkg/schemas/mapper/node_address.go +++ b/pkg/schemas/mapper/node_address.go @@ -3,6 +3,7 @@ package mapper import ( "github.com/rancher/norman/types" "github.com/rancher/norman/types/values" + "github.com/rancher/shepherd/extensions/defaults/annotations" ) const ( @@ -39,7 +40,7 @@ type NodeAddressAnnotationMapper struct { } func (n NodeAddressAnnotationMapper) FromInternal(data map[string]interface{}) { - externalIP, ok := values.GetValue(data, "status", "nodeAnnotations", "rke.cattle.io/external-ip") + externalIP, ok := values.GetValue(data, "status", "nodeAnnotations", annotations.ExternalIp) if ok { data[extIPField] = externalIP }