diff --git a/clients/dynamic/dynamic.go b/clients/dynamic/dynamic.go index b46ec3ec..f2a6f1a5 100644 --- a/clients/dynamic/dynamic.go +++ b/clients/dynamic/dynamic.go @@ -23,7 +23,7 @@ type Client struct { } // NewForConfig creates a new dynamic client or returns an error. -func NewForConfig(ts *session.Session, inConfig *rest.Config) (dynamic.Interface, error) { +func NewForConfig(ts *session.Session, inConfig *rest.Config) (*Client, error) { logrus.Debugf("Dynamic Client Host:%s", inConfig.Host) dynamicClient, err := dynamic.NewForConfig(inConfig) @@ -44,7 +44,7 @@ func NewForConfig(ts *session.Session, inConfig *rest.Config) (dynamic.Interface // Version: "v3", // Resource: "users", // } -func (d *Client) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { +func (d *Client) Resource(resource schema.GroupVersionResource) *NamespaceableResourceClient { return &NamespaceableResourceClient{ NamespaceableResourceInterface: d.Interface.Resource(resource), ts: d.ts, @@ -59,7 +59,7 @@ type NamespaceableResourceClient struct { } // Namespace returns a dynamic.ResourceInterface that is embedded in ResourceClient, so ultimately its Create is overwritten. -func (d *NamespaceableResourceClient) Namespace(s string) dynamic.ResourceInterface { +func (d *NamespaceableResourceClient) Namespace(s string) *ResourceClient { return &ResourceClient{ ResourceInterface: d.NamespaceableResourceInterface.Namespace(s), ts: d.ts, diff --git a/clients/rancher/client.go b/clients/rancher/client.go index a4d120f9..5a55ef5b 100644 --- a/clients/rancher/client.go +++ b/clients/rancher/client.go @@ -16,6 +16,7 @@ import ( management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" v1 "github.com/rancher/shepherd/clients/rancher/v1" + rancherDynamic "github.com/rancher/shepherd/clients/dynamic" kubeProvisioning "github.com/rancher/shepherd/clients/provisioning" "github.com/rancher/shepherd/clients/ranchercli" kubeRKE "github.com/rancher/shepherd/clients/rke" @@ -27,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -222,7 +222,7 @@ func (c *Client) GetClusterCatalogClient(clusterID string) (*catalog.Client, err } // GetRancherDynamicClient is a helper function that instantiates a dynamic client to communicate with the rancher host. -func (c *Client) GetRancherDynamicClient() (dynamic.Interface, error) { +func (c *Client) GetRancherDynamicClient() (*rancherDynamic.Client, error) { dynamic, err := frameworkDynamic.NewForConfig(c.Session, c.restConfig) if err != nil { return nil, err @@ -251,7 +251,7 @@ func (c *Client) GetKubeAPIRKEClient() (*kubeRKE.Client, error) { } // GetDownStreamClusterClient is a helper function that instantiates a dynamic client to communicate with a specific cluster. -func (c *Client) GetDownStreamClusterClient(clusterID string) (dynamic.Interface, error) { +func (c *Client) GetDownStreamClusterClient(clusterID string) (*rancherDynamic.Client, error) { restConfig := *c.restConfig restConfig.Host = fmt.Sprintf("https://%s/k8s/clusters/%s", c.restConfig.Host, clusterID) @@ -263,7 +263,7 @@ func (c *Client) GetDownStreamClusterClient(clusterID string) (dynamic.Interface } // SwitchContext is a helper function that changes the current context to `context` and instantiates a dynamic client -func (c *Client) SwitchContext(context string, clientConfig *clientcmd.ClientConfig) (dynamic.Interface, error) { +func (c *Client) SwitchContext(context string, clientConfig *clientcmd.ClientConfig) (*rancherDynamic.Client, error) { overrides := clientcmd.ConfigOverrides{CurrentContext: context} rawConfig, err := (*clientConfig).RawConfig() diff --git a/extensions/clusters/clusters.go b/extensions/clusters/clusters.go index f6a0cd81..f2fb095d 100644 --- a/extensions/clusters/clusters.go +++ b/extensions/clusters/clusters.go @@ -1278,6 +1278,43 @@ func WatchAndWaitForCluster(client *rancher.Client, steveID string) error { return err } +// WatchAndWaitForCluster is function that waits for a cluster to go unactive before checking its active state. +func WatchAndWaitForClusterWithTimeout(client *rancher.Client, steveID string, timeoutSeconds *int64) error { + var clusterResp *v1.SteveAPIObject + err := kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) { + clusterResp, err = client.Steve.SteveType(ProvisioningSteveResourceType).ByID(steveID) + if err != nil { + return false, err + } + state := clusterResp.ObjectMeta.State.Name + return state != "active", nil + }) + if err != nil { + return err + } + logrus.Infof("waiting for cluster to be up.............") + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + if err != nil { + return err + } + kubeProvisioningClient, err := adminClient.GetKubeAPIProvisioningClient() + if err != nil { + return err + } + + result, err := kubeProvisioningClient.Clusters(clusterResp.ObjectMeta.Namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + clusterResp.Name, + TimeoutSeconds: timeoutSeconds, + }) + if err != nil { + return err + } + + err = wait.WatchWait(result, IsProvisioningClusterReady) + return err +} + // GetProvisioningClusterByName is a helper function to get cluster object with the cluster name func GetProvisioningClusterByName(client *rancher.Client, clusterName string, namespace string) (*apisV1.Cluster, *v1.SteveAPIObject, error) { clusterObj, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(namespace + "/" + clusterName) @@ -1317,6 +1354,27 @@ func WaitForActiveRKE1Cluster(client *rancher.Client, clusterID string) error { return nil } +func WaitForActiveRKE1ClusterWithTimeout(client *rancher.Client, clusterID string, timeoutMinutes int) error { + err := kwait.Poll(500*time.Millisecond, 30*time.Minute, func() (done bool, err error) { + client, err = client.ReLogin() + if err != nil { + return false, err + } + clusterResp, err := client.Management.Cluster.ByID(clusterID) + if err != nil { + return false, err + } + if clusterResp.State == active { + return true, nil + } + return false, nil + }) + if err != nil { + return err + } + return nil +} + // ListDownstreamClusters is a helper function to get the name of the downstream clusters func ListDownstreamClusters(client *rancher.Client) (clusterNames []string, err error) { clusterList, err := client.Steve.SteveType(ProvisioningSteveResourceType).ListAll(nil) diff --git a/extensions/codecoverage/codecoverage.go b/extensions/codecoverage/codecoverage.go deleted file mode 100644 index 084d6019..00000000 --- a/extensions/codecoverage/codecoverage.go +++ /dev/null @@ -1,185 +0,0 @@ -package codecoverage - -import ( - "context" - "fmt" - "strings" - "time" - - apiv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" - "github.com/rancher/shepherd/clients/rancher" - v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/kubeconfig" - "github.com/rancher/shepherd/pkg/killserver" - "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - k8sErrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" -) - -var podGroupVersionResource = corev1.SchemeGroupVersion.WithResource("pods") - -const ( - cattleSystemNameSpace = "cattle-system" - localCluster = "local" - rancherCoverFile = "ranchercoverage" - agentCoverFile = "agentcoverage" - outputDir = "cover" -) - -func checkServiceIsRunning(dynamicClient dynamic.Interface) error { - return kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) { - _, err = dynamicClient.Resource(podGroupVersionResource).Namespace(cattleSystemNameSpace).List(context.Background(), metav1.ListOptions{}) - if k8sErrors.IsInternalError(err) || k8sErrors.IsServiceUnavailable(err) { - return false, nil - } else if err != nil { - return false, err - } - return true, nil - }) -} - -func killTestServices(client *rancher.Client, clusterID string, podNames []string) error { - cmd := []string{ - "/bin/sh", - "-c", - fmt.Sprintf("curl -s localhost%s", killserver.Port), - } - - kubeConfig, err := kubeconfig.GetKubeconfig(client, clusterID) - if err != nil { - return err - } - - restConfig, err := (*kubeConfig).ClientConfig() - if err != nil { - return err - } - - for _, podName := range podNames { - _, err := kubeconfig.KubectlExec(restConfig, podName, cattleSystemNameSpace, cmd) - if err != nil { - logrus.Errorf("error killing pod container %v", err) - } - } - - return nil -} - -func retrieveCodeCoverageFile(client *rancher.Client, clusterID, coverageFilename string, podNames []string) error { - kubeConfig, err := kubeconfig.GetKubeconfig(client, clusterID) - if err != nil { - return err - } - - restConfig, err := (*kubeConfig).ClientConfig() - if err != nil { - return err - } - - for _, podName := range podNames { - fileName := fmt.Sprintf("%s%s", podName, coverageFilename) - dst := fmt.Sprintf("%s/%s", outputDir, fileName) - - err := kubeconfig.CopyFileFromPod(restConfig, *kubeConfig, podName, cattleSystemNameSpace, coverageFilename, dst) - if err != nil { - return err - } - } - - return nil -} - -// KillRancherTestServicesRetrieveCoverage is a function that kills the rancher service of the local cluster -// inorder for the code coverage report to be written, and then copies over the coverage reports from the pods -// to a local destination. The custom code coverage rancher image must be running in the local cluster. -func KillRancherTestServicesRetrieveCoverage(client *rancher.Client) error { - var podNames []string - dynamicClient, err := client.GetRancherDynamicClient() - if err != nil { - return err - } - - pods, err := dynamicClient.Resource(podGroupVersionResource).Namespace(cattleSystemNameSpace).List(context.Background(), metav1.ListOptions{}) - if err != nil { - return err - } - - for _, pod := range pods.Items { - name := pod.GetName() - if strings.Contains(name, "rancher") && !strings.Contains(name, "webhook") { - podNames = append(podNames, pod.GetName()) - } - } - - err = killTestServices(client, localCluster, podNames) - if err != nil { - return err - } - - err = checkServiceIsRunning(dynamicClient) - if err != nil { - return err - } - - return retrieveCodeCoverageFile(client, localCluster, rancherCoverFile, podNames) -} - -// KillAgentTestServicesRetrieveCoverage is a function that kills the cattle-cluster-agent service of a downstream cluster -// inorder for the code coverage report to be written, and then copies over the coverage reports from the pods -// to a local destination. The custom code coverage rancher-agent image must be running in the downstream cluster. -func KillAgentTestServicesRetrieveCoverage(client *rancher.Client) error { - clusters, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ListAll(nil) - if err != nil { - return err - } - - for _, cluster := range clusters.Data { - clusterStatus := &apiv1.ClusterStatus{} - err = v1.ConvertToK8sType(cluster.Status, clusterStatus) - if err != nil { - return err - } - clusterID := clusterStatus.ClusterName - if clusterID != localCluster { - dynamicClient, err := client.GetDownStreamClusterClient(clusterID) - if err != nil { - logrus.Errorf("could not connect to downstream cluster") - continue - } - - pods, err := dynamicClient.Resource(podGroupVersionResource).Namespace(cattleSystemNameSpace).List(context.Background(), metav1.ListOptions{}) - if err != nil { - logrus.Errorf("could not list pods") - continue - } - - var podNames []string - for _, pod := range pods.Items { - if strings.Contains(pod.GetName(), "cattle-cluster-agent") { - podNames = append(podNames, pod.GetName()) - } - } - - err = killTestServices(client, clusterID, podNames) - if err != nil { - return err - } - - err = checkServiceIsRunning(dynamicClient) - if err != nil { - return err - } - - err = retrieveCodeCoverageFile(client, clusterID, agentCoverFile, podNames) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/extensions/kubeapi/cluster/operations.go b/extensions/kubeapi/cluster/operations.go new file mode 100644 index 00000000..4016f5e2 --- /dev/null +++ b/extensions/kubeapi/cluster/operations.go @@ -0,0 +1,34 @@ +package cluster + +import ( + "context" + + "github.com/rancher/shepherd/clients/rancher" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ManagementGroupName = "management.cattle.io" + +func ManagementClusterGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{Group: ManagementGroupName, Version: "v3", Resource: "clusters"} +} + +// ListAll is a helper function that uses the dynamic client to return a list of all clusters.management.cattle.io. +func ListAll(client *rancher.Client, opts *metav1.ListOptions) (list *unstructured.UnstructuredList, err error) { + if opts == nil { + opts = &metav1.ListOptions{} + } + + dynamic, err := client.GetRancherDynamicClient() + if err != nil { + return + } + + unstructuredClusterList, err := dynamic.Resource(ManagementClusterGVR()).List(context.TODO(), *opts) + if err != nil { + return + } + return unstructuredClusterList, err +} diff --git a/extensions/kubeapi/cluster/summary.go b/extensions/kubeapi/cluster/summary.go index 50b86669..e762223f 100644 --- a/extensions/kubeapi/cluster/summary.go +++ b/extensions/kubeapi/cluster/summary.go @@ -6,7 +6,6 @@ import ( "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/wrangler/pkg/summary" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) // IsClusterActive is a helper function that uses the dynamic client to return cluster's ready state. @@ -16,7 +15,7 @@ func IsClusterActive(client *rancher.Client, clusterID string) (ready bool, err return } - unstructuredCluster, err := dynamic.Resource(schema.GroupVersionResource{Group: "management.cattle.io", Version: "v3", Resource: "clusters"}).Get(context.TODO(), clusterID, metav1.GetOptions{}) + unstructuredCluster, err := dynamic.Resource(ManagementClusterGVR()).Get(context.TODO(), clusterID, metav1.GetOptions{}) if err != nil { return } diff --git a/extensions/kubeapi/configmaps/configmaps.go b/extensions/kubeapi/configmaps/configmaps.go index 51509efe..8a31799e 100644 --- a/extensions/kubeapi/configmaps/configmaps.go +++ b/extensions/kubeapi/configmaps/configmaps.go @@ -9,6 +9,16 @@ import ( coreV1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" +) + +type PatchOP string + +const ( + AddPatchOP PatchOP = "add" + ReplacePatchOP PatchOP = "replace" + RemovePatchOP PatchOP = "remove" ) // ConfigMapGroupVersionResource is the required Group Version Resource for accessing config maps in a cluster, @@ -21,7 +31,7 @@ var ConfigMapGroupVersionResource = schema.GroupVersionResource{ // CreateConfigMap is a helper function that uses the dynamic client to create a config map on a namespace for a specific cluster. // It registers a delete fuction. -func CreateConfigMap(client *rancher.Client, clusterName, configMapName, description, namespace string, data, labels, annotations map[string]string) (*coreV1.ConfigMap, error) { +func CreateConfigMap(client *rancher.Client, clusterID, configMapName, description, namespace string, data, labels, annotations map[string]string) (*coreV1.ConfigMap, error) { // ConfigMap object for a namespace in a cluster annotations["field.cattle.io/description"] = description configMap := &coreV1.ConfigMap{ @@ -34,7 +44,7 @@ func CreateConfigMap(client *rancher.Client, clusterName, configMapName, descrip Data: data, } - dynamicClient, err := client.GetDownStreamClusterClient(clusterName) + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) if err != nil { return nil, err } @@ -76,3 +86,90 @@ func NewConfigmapTemplate(configmapName, namespace string, annotations, labels, Data: data, } } + +func ListConfigMaps(client *rancher.Client, clusterID, namespace string, opts metav1.ListOptions) (*coreV1.ConfigMapList, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + unstructuredResp, err := configMapResource.List(context.TODO(), opts) + if err != nil { + return nil, err + } + + newConfigMapList := &coreV1.ConfigMapList{} + err = scheme.Scheme.Convert(unstructuredResp, newConfigMapList, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newConfigMapList, nil +} + +func GetConfigMapByName(client *rancher.Client, clusterID, configMapName, namespace string, getOpts metav1.GetOptions) (*coreV1.ConfigMap, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + unstructuredResp, err := configMapResource.Get(context.TODO(), configMapName, getOpts) + if err != nil { + return nil, err + } + + newConfigMap := &coreV1.ConfigMap{} + err = scheme.Scheme.Convert(unstructuredResp, newConfigMap, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newConfigMap, nil +} + +func PatchConfigMap(client *rancher.Client, clusterID, configMapName, namespace string, data string, patchType types.PatchType) (*coreV1.ConfigMap, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + + unstructuredResp, err := configMapResource.Patch(context.TODO(), configMapName, patchType, []byte(data), metav1.PatchOptions{}) + if err != nil { + return nil, err + } + + newConfigMap := &coreV1.ConfigMap{} + err = scheme.Scheme.Convert(unstructuredResp, newConfigMap, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newConfigMap, nil +} + +// PatchConfigMapFromYAML is a helper function that uses the dynamic client to patch a configMap in a namespace for a specific cluster. +// Different merge strategies are supported based on the PatchType. +func PatchConfigMapFromYAML(client *rancher.Client, clusterID, configMapName, namespace string, rawYAML []byte, patchType types.PatchType) (*coreV1.ConfigMap, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + + rawJSON, err := yaml.ToJSON(rawYAML) + if err != nil { + return nil, err + } + + unstructuredResp, err := configMapResource.Patch(context.TODO(), configMapName, patchType, rawJSON, metav1.PatchOptions{}) + if err != nil { + return nil, err + } + + newConfigMap := &coreV1.ConfigMap{} + err = scheme.Scheme.Convert(unstructuredResp, newConfigMap, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newConfigMap, nil +} diff --git a/extensions/kubeconfig/exec.go b/extensions/kubeconfig/exec.go index 6a6838ff..fc0763ce 100644 --- a/extensions/kubeconfig/exec.go +++ b/extensions/kubeconfig/exec.go @@ -2,6 +2,7 @@ package kubeconfig import ( "bytes" + "context" "fmt" "os" "strings" @@ -73,7 +74,7 @@ func KubectlExec(restConfig *restclient.Config, podName, namespace string, comma } logStreamer := &LogStreamer{} - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ Stdin: nil, Stdout: logStreamer, Stderr: os.Stderr, diff --git a/extensions/kubeconfig/kubeconfig.go b/extensions/kubeconfig/kubeconfig.go index c5f2ee94..f54e02cf 100644 --- a/extensions/kubeconfig/kubeconfig.go +++ b/extensions/kubeconfig/kubeconfig.go @@ -2,8 +2,10 @@ package kubeconfig import ( "errors" + "os" "github.com/rancher/shepherd/clients/rancher" + "github.com/sirupsen/logrus" "k8s.io/client-go/tools/clientcmd" ) @@ -33,3 +35,28 @@ func GetKubeconfig(client *rancher.Client, clusterID string) (*clientcmd.ClientC return &cfg, nil } + +func WriteKubeconfigToFile(client *rancher.Client, clusterID, path string) error { + cluster, err := client.Management.Cluster.ByID(clusterID) + if err != nil { + return err + } + + kubeConfig, err := client.Management.Cluster.ActionGenerateKubeconfig(cluster) + if err != nil { + return err + } + + _, statErr := os.Stat(path) + if statErr == nil { + err = os.Remove(path) + } + + if f, err := os.Create(path); err == nil { + if _, err := f.Write([]byte(kubeConfig.Config)); err != nil { + return err + } + } + logrus.Infof("Finished writing. Err: %v", err) + return err +} diff --git a/extensions/kubeconfig/podlogs.go b/extensions/kubeconfig/podlogs.go index 414787cb..5856d824 100644 --- a/extensions/kubeconfig/podlogs.go +++ b/extensions/kubeconfig/podlogs.go @@ -4,17 +4,22 @@ import ( "bufio" "context" "fmt" + "io" + "os" "strconv" "strings" "github.com/rancher/shepherd/clients/rancher" + "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/serializer" k8Scheme "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" ) -// GetPodLogs fetches logs from a Kubernetes pod. Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string causes no buffering. +// GetPodLogs fetches logs from a Kubernetes pod +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any func GetPodLogs(client *rancher.Client, clusterID string, podName string, namespace string, bufferSizeStr string) (string, error) { var restConfig *restclient.Config @@ -73,10 +78,71 @@ func GetPodLogs(client *rancher.Client, clusterID string, podName string, namesp return logs, nil } +// GetPodLogsWithOpts fetches logs from a Kubernetes pod and allows +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any +func GetPodLogsWithOpts(client *rancher.Client, clusterID string, podName string, namespace string, bufferSizeStr string, opts *corev1.PodLogOptions) (string, error) { + var restConfig *restclient.Config + + kubeConfig, err := GetKubeconfig(client, clusterID) + if err != nil { + return "", err + } + + restConfig, err = (*kubeConfig).ClientConfig() + if err != nil { + return "", err + } + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) + restConfig.ContentConfig.GroupVersion = &podGroupVersion + restConfig.APIPath = apiPath + + restClient, err := restclient.RESTClientFor(restConfig) + if err != nil { + return "", err + } + + req := restClient.Get().Resource("pods").Name(podName).Namespace(namespace).SubResource("log") + req.VersionedParams( + opts, + k8Scheme.ParameterCodec, + ) + + stream, err := req.Stream(context.TODO()) + if err != nil { + return "", fmt.Errorf("error streaming pod logs for pod %s/%s: %v", namespace, podName, err) + } + + defer stream.Close() + + reader := bufio.NewScanner(stream) + + if bufferSizeStr != "" { + bufferSize, err := parseBufferSize(bufferSizeStr) + if err != nil { + return "", fmt.Errorf("error in parseBufferSize: %v", err) + } + + buf := make([]byte, bufferSize) + reader.Buffer(buf, bufferSize) + } + + var logs string + for reader.Scan() { + logs = logs + fmt.Sprintf("%s\n", reader.Text()) + logrus.Info(reader.Text()) + } + + if err := reader.Err(); err != nil { + return "", fmt.Errorf("error reading pod logs for pod %s/%s: %v", namespace, podName, err) + } + return logs, nil +} + // parseBufferSize is a helper function that parses a size string and returns // the equivalent size in bytes. The provided size string should end with a // suffix of 'KB', 'MB', or 'GB'. If no suffix is provided, the function will -// return an error. +// return an int of the buffer size and an error if any func parseBufferSize(sizeStr string) (int, error) { sizeStr = strings.ToUpper(sizeStr) var mult int @@ -101,3 +167,89 @@ func parseBufferSize(sizeStr string) (int, error) { return size * mult, nil } + +// GetPodLogsWithContext fetches logs from a Kubernetes pod +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any +func GetPodLogsWithContext(ctx context.Context, client *rancher.Client, clusterID, podName, namespace, bufferSizeStr, logFilePath string, opts *corev1.PodLogOptions) (string, error) { + var restConfig *restclient.Config + + kubeConfig, err := GetKubeconfig(client, clusterID) + if err != nil { + return "", err + } + + restConfig, err = (*kubeConfig).ClientConfig() + if err != nil { + return "", err + } + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) + restConfig.ContentConfig.GroupVersion = &podGroupVersion + restConfig.APIPath = apiPath + + restClient, err := restclient.RESTClientFor(restConfig) + if err != nil { + return "", err + } + + req := restClient.Get().Resource("pods").Name(podName).Namespace(namespace).SubResource("log") + req.VersionedParams( + opts, + k8Scheme.ParameterCodec, + ) + + stream, err := req.Stream(context.Background()) + if err != nil { + return "", fmt.Errorf("error streaming pod logs for pod %s/%s: %v", namespace, podName, err) + } + defer stream.Close() + + logFile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + return "", fmt.Errorf("error opening log file: %v", err) + } + defer logFile.Close() + + return readAndWriteLogsWithContext(ctx, stream, logFile, bufferSizeStr) +} + +// readAndWriteLogsWithContext is a helper function that reads and writes text to console output and the specific logFile using a channel +// - filters out log lines containing "debug" +// - if the context is canceled before all logs are read, the function returns immediately with the logs read so far and the context's error +// +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any +func readAndWriteLogsWithContext(ctx context.Context, stream io.ReadCloser, logFile *os.File, bufferSizeStr string) (string, error) { + logs := &strings.Builder{} + defer logFile.Close() + + scanner := bufio.NewScanner(stream) + if bufferSizeStr != "" { + bufferSize, err := parseBufferSize(bufferSizeStr) + if err != nil { + return "", fmt.Errorf("error in parseBufferSize: %v", err) + } + + buf := make([]byte, bufferSize) + scanner.Buffer(buf, bufferSize) + } + + for scanner.Scan() { + select { + case <-ctx.Done(): + return logs.String(), ctx.Err() // Return immediately if context is canceled + default: + logLine := scanner.Text() + if !strings.Contains(strings.ToLower(logLine), "debug") && strings.TrimSpace(logLine) != "" { + logrus.Info(logLine) // Write log + } + fmt.Fprintln(logFile, logLine) // Write log to file + logs.WriteString(logLine + "\n") + } + } + if err := scanner.Err(); err != nil { + return logs.String(), fmt.Errorf("error reading logs: %v", err) + } + + return logs.String(), nil +} diff --git a/extensions/kubeconfig/pods.go b/extensions/kubeconfig/pods.go new file mode 100644 index 00000000..1eb03e9c --- /dev/null +++ b/extensions/kubeconfig/pods.go @@ -0,0 +1,57 @@ +package kubeconfig + +import ( + "context" + "errors" + + "github.com/rancher/shepherd/clients/rancher" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes" + k8Scheme "k8s.io/client-go/kubernetes/scheme" +) + +func GetPods(client *rancher.Client, clusterID string, namespace string, listOptions *metav1.ListOptions) ([]corev1.Pod, error) { + + kubeConfig, err := GetKubeconfig(client, clusterID) + if err != nil { + return nil, err + } + + restConfig, err := (*kubeConfig).ClientConfig() + if err != nil { + return nil, err + } + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) + restConfig.ContentConfig.GroupVersion = &podGroupVersion + restConfig.APIPath = apiPath + + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, err + } + + pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), *listOptions) + if err != nil { + return nil, err + } + return pods.Items, nil +} + +func GetPodNames(client *rancher.Client, clusterID string, namespace string, listOptions *metav1.ListOptions) ([]string, error) { + pods, err := GetPods(client, clusterID, namespace, listOptions) + if err != nil { + return nil, err + } + + var names []string + for _, pod := range pods { + names = append(names, pod.Name) + } + if len(names) == 0 { + return nil, errors.New("GetPodNames: no pod names found") + } + + return names, nil +} diff --git a/extensions/provisioning/creates.go b/extensions/provisioning/creates.go deleted file mode 100644 index 09d4ef1a..00000000 --- a/extensions/provisioning/creates.go +++ /dev/null @@ -1,1036 +0,0 @@ -package provisioning - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/rancher/norman/types" - - "github.com/sirupsen/logrus" - - "github.com/rancher/shepherd/clients/corral" - "github.com/rancher/shepherd/clients/rancher" - - apiv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" - rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" - - v1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/cloudcredentials/aws" - "github.com/rancher/shepherd/extensions/cloudcredentials/azure" - "github.com/rancher/shepherd/extensions/cloudcredentials/google" - "github.com/rancher/shepherd/extensions/cloudcredentials/vsphere" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/clusters/aks" - "github.com/rancher/shepherd/extensions/clusters/eks" - "github.com/rancher/shepherd/extensions/clusters/gke" - "github.com/rancher/shepherd/extensions/defaults" - "github.com/rancher/shepherd/extensions/etcdsnapshot" - k3sHardening "github.com/rancher/shepherd/extensions/hardening/k3s" - rke1Hardening "github.com/rancher/shepherd/extensions/hardening/rke1" - rke2Hardening "github.com/rancher/shepherd/extensions/hardening/rke2" - "github.com/rancher/shepherd/extensions/machinepools" - nodestat "github.com/rancher/shepherd/extensions/nodes" - "github.com/rancher/shepherd/extensions/pipeline" - "github.com/rancher/shepherd/extensions/provisioninginput" - nodepools "github.com/rancher/shepherd/extensions/rke1/nodepools" - "github.com/rancher/shepherd/extensions/rke1/nodetemplates" - "github.com/rancher/shepherd/extensions/secrets" - "github.com/rancher/shepherd/extensions/tokenregistration" - "github.com/rancher/shepherd/pkg/environmentflag" - namegen "github.com/rancher/shepherd/pkg/namegenerator" - "github.com/rancher/shepherd/pkg/nodes" - "github.com/rancher/shepherd/pkg/wait" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kwait "k8s.io/apimachinery/pkg/util/wait" - - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" -) - -const ( - active = "active" - internalIP = "alpha.kubernetes.io/provided-node-ip" - rke1ExternalIP = "rke.cattle.io/external-ip" - namespace = "fleet-default" - - rke2k3sAirgapCustomCluster = "rke2k3sairgapcustomcluster" - rke2k3sNodeCorralName = "rke2k3sregisterNode" - corralPackageAirgapCustomClusterName = "airgapCustomCluster" - rke1AirgapCustomCluster = "rke1airgapcustomcluster" - rke1NodeCorralName = "rke1registerNode" -) - -// CreateProvisioningCluster provisions a non-rke1 cluster, then runs verify checks -func CreateProvisioningCluster(client *rancher.Client, provider Provider, clustersConfig *clusters.ClusterConfig, hostnameTruncation []machinepools.HostnameTruncation) (*v1.SteveAPIObject, error) { - cloudCredential, err := provider.CloudCredFunc(client) - if err != nil { - return nil, err - } - - if clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { - err = clusters.CreateRancherBaselinePSACT(client, clustersConfig.PSACT) - if err != nil { - return nil, err - } - } - - clusterName := namegen.AppendRandomString(provider.Name.String()) - generatedPoolName := fmt.Sprintf("nc-%s-pool1-", clusterName) - machinePoolConfigs := provider.MachinePoolFunc(generatedPoolName, namespace) - - var machinePoolResponses []v1.SteveAPIObject - - for _, machinePoolConfig := range machinePoolConfigs { - machinePoolConfigResp, err := client.Steve. - SteveType(provider.MachineConfigPoolResourceSteveType). - Create(&machinePoolConfig) - if err != nil { - return nil, err - } - machinePoolResponses = append(machinePoolResponses, *machinePoolConfigResp) - } - - if clustersConfig.Registries != nil { - if clustersConfig.Registries.RKE2Registries != nil { - if clustersConfig.Registries.RKE2Username != "" && clustersConfig.Registries.RKE2Password != "" { - steveClient, err := client.Steve.ProxyDownstream("local") - if err != nil { - return nil, err - } - - secretName := fmt.Sprintf("priv-reg-sec-%s", clusterName) - secretTemplate := secrets.NewSecretTemplate(secretName, namespace, map[string][]byte{ - "password": []byte(clustersConfig.Registries.RKE2Password), - "username": []byte(clustersConfig.Registries.RKE2Username), - }, - corev1.SecretTypeBasicAuth, - ) - - registrySecret, err := steveClient.SteveType(secrets.SecretSteveType).Create(secretTemplate) - if err != nil { - return nil, err - } - - for registryName, registry := range clustersConfig.Registries.RKE2Registries.Configs { - registry.AuthConfigSecretName = registrySecret.Name - clustersConfig.Registries.RKE2Registries.Configs[registryName] = registry - } - } - } - } - - var machineConfigs []machinepools.MachinePoolConfig - var pools []machinepools.Pools - for _, pool := range clustersConfig.MachinePools { - machineConfigs = append(machineConfigs, pool.MachinePoolConfig) - pools = append(pools, pool.Pools) - } - - machinePools := machinepools. - CreateAllMachinePools(machineConfigs, pools, machinePoolResponses, provider.Roles, hostnameTruncation) - - if clustersConfig.CloudProvider == provisioninginput.VsphereCloudProviderName.String() { - - vcenterCredentials := map[string]interface{}{ - "datacenters": machinePoolConfigs[0].Object["datacenter"], - "host": cloudCredential.VmwareVsphereConfig.Vcenter, - "password": vsphere.GetVspherePassword(), - "username": cloudCredential.VmwareVsphereConfig.Username, - } - clustersConfig.AddOnConfig = &provisioninginput.AddOnConfig{ - ChartValues: &rkev1.GenericMap{ - Data: map[string]interface{}{ - "rancher-vsphere-cpi": map[string]interface{}{ - "vCenter": vcenterCredentials, - }, - "rancher-vsphere-csi": map[string]interface{}{ - "storageClass": map[string]interface{}{ - "datastoreURL": machinePoolConfigs[0].Object["datastoreUrl"], - }, - "vCenter": vcenterCredentials, - }, - }, - }, - } - } - - cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespace, clustersConfig, machinePools, cloudCredential.ID) - - for _, truncatedPool := range hostnameTruncation { - if truncatedPool.PoolNameLengthLimit > 0 || truncatedPool.ClusterNameLengthLimit > 0 { - cluster.GenerateName = "t-" - if truncatedPool.ClusterNameLengthLimit > 0 { - cluster.Spec.RKEConfig.MachinePoolDefaults.HostnameLengthLimit = truncatedPool.ClusterNameLengthLimit - } - - break - } - } - - _, err = clusters.CreateK3SRKE2Cluster(client, cluster) - if err != nil { - return nil, err - } - - if client.Flags.GetValue(environmentflag.UpdateClusterName) { - pipeline.UpdateConfigClusterName(clusterName) - } - - adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) - if err != nil { - return nil, err - } - - createdCluster, err := adminClient.Steve. - SteveType(clusters.ProvisioningSteveResourceType). - ByID(namespace + "/" + clusterName) - - return createdCluster, err -} - -// CreateProvisioningCustomCluster provisions a non-rke1 cluster using a 3rd party client for its nodes, then runs verify checks -func CreateProvisioningCustomCluster(client *rancher.Client, externalNodeProvider *ExternalNodeProvider, clustersConfig *clusters.ClusterConfig) (*v1.SteveAPIObject, error) { - setLogrusFormatter() - rolesPerNode := []string{} - quantityPerPool := []int32{} - rolesPerPool := []string{} - for _, pool := range clustersConfig.MachinePools { - var finalRoleCommand string - if pool.MachinePoolConfig.ControlPlane { - finalRoleCommand += " --controlplane" - } - - if pool.MachinePoolConfig.Etcd { - finalRoleCommand += " --etcd" - } - - if pool.MachinePoolConfig.Worker { - finalRoleCommand += " --worker" - } - - if pool.MachinePoolConfig.Windows { - finalRoleCommand += " --windows" - } - - quantityPerPool = append(quantityPerPool, pool.MachinePoolConfig.Quantity) - rolesPerPool = append(rolesPerPool, finalRoleCommand) - for i := int32(0); i < pool.MachinePoolConfig.Quantity; i++ { - rolesPerNode = append(rolesPerNode, finalRoleCommand) - } - } - - if clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { - err := clusters.CreateRancherBaselinePSACT(client, clustersConfig.PSACT) - if err != nil { - return nil, err - } - } - - nodes, err := externalNodeProvider.NodeCreationFunc(client, rolesPerPool, quantityPerPool) - if err != nil { - return nil, err - } - - clusterName := namegen.AppendRandomString(externalNodeProvider.Name) - - cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespace, clustersConfig, nil, "") - - if clustersConfig.Hardened && strings.Contains(clustersConfig.KubernetesVersion, clusters.RKE2ClusterType.String()) { - err = rke2Hardening.HardenRKE2Nodes(nodes, rolesPerNode) - if err != nil { - return nil, err - } - - cluster = clusters.HardenRKE2ClusterConfig(clusterName, namespace, clustersConfig, nil, "") - } - - clusterResp, err := clusters.CreateK3SRKE2Cluster(client, cluster) - if err != nil { - return nil, err - } - - if client.Flags.GetValue(environmentflag.UpdateClusterName) { - pipeline.UpdateConfigClusterName(clusterName) - } - - client, err = client.ReLogin() - if err != nil { - return nil, err - } - - customCluster, err := client.Steve.SteveType(etcdsnapshot.ProvisioningSteveResouceType).ByID(clusterResp.ID) - if err != nil { - return nil, err - } - - clusterStatus := &apiv1.ClusterStatus{} - err = v1.ConvertToK8sType(customCluster.Status, clusterStatus) - if err != nil { - return nil, err - } - - token, err := tokenregistration.GetRegistrationToken(client, clusterStatus.ClusterName) - if err != nil { - return nil, err - } - - kubeProvisioningClient, err := client.GetKubeAPIProvisioningClient() - if err != nil { - return nil, err - } - - result, err := kubeProvisioningClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ - FieldSelector: "metadata.name=" + clusterName, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, - }) - if err != nil { - return nil, err - } - - checkFunc := clusters.IsProvisioningClusterReady - var command string - totalNodesObserved := 0 - for poolIndex, poolRole := range rolesPerPool { - if strings.Contains(poolRole, "windows") { - totalNodesObserved += int(quantityPerPool[poolIndex]) - continue - } - for nodeIndex := 0; nodeIndex < int(quantityPerPool[poolIndex]); nodeIndex++ { - node := nodes[totalNodesObserved+nodeIndex] - - logrus.Infof("Execute Registration Command for node %s", node.NodeID) - logrus.Infof("Linux pool detected, using bash...") - - command = fmt.Sprintf("%s %s", token.InsecureNodeCommand, poolRole) - if clustersConfig.MachinePools[poolIndex].IsSecure { - command = fmt.Sprintf("%s %s", token.NodeCommand, poolRole) - } - command = createRegistrationCommand(command, node.PublicIPAddress, node.PrivateIPAddress, clustersConfig.MachinePools[poolIndex]) - logrus.Infof("Command: %s", command) - - output, err := node.ExecuteCommand(command) - if err != nil { - return nil, err - } - logrus.Infof(output) - } - totalNodesObserved += int(quantityPerPool[poolIndex]) - } - - err = wait.WatchWait(result, checkFunc) - if err != nil { - return nil, err - } - totalNodesObserved = 0 - for poolIndex := 0; poolIndex < len(rolesPerPool); poolIndex++ { - if strings.Contains(rolesPerPool[poolIndex], "windows") { - for nodeIndex := 0; nodeIndex < int(quantityPerPool[poolIndex]); nodeIndex++ { - node := nodes[totalNodesObserved+nodeIndex] - - logrus.Infof("Execute Registration Command for node %s", node.NodeID) - logrus.Infof("Windows pool detected, using powershell.exe...") - command = fmt.Sprintf("powershell.exe %s ", token.InsecureWindowsNodeCommand) - if clustersConfig.MachinePools[poolIndex].IsSecure { - command = fmt.Sprintf("powershell.exe %s ", token.WindowsNodeCommand) - } - command = createWindowsRegistrationCommand(command, node.PublicIPAddress, node.PrivateIPAddress, clustersConfig.MachinePools[poolIndex]) - logrus.Infof("Command: %s", command) - - output, err := node.ExecuteCommand(command) - if err != nil { - return nil, err - } - logrus.Infof(output) - } - } - totalNodesObserved += int(quantityPerPool[poolIndex]) - } - - if clustersConfig.Hardened { - if strings.Contains(clustersConfig.KubernetesVersion, clusters.K3SClusterType.String()) { - err = k3sHardening.HardenK3SNodes(nodes, rolesPerNode, clustersConfig.KubernetesVersion) - if err != nil { - return nil, err - } - - hardenCluster := clusters.HardenK3SClusterConfig(clusterName, namespace, clustersConfig, nil, "") - - _, err := clusters.UpdateK3SRKE2Cluster(client, clusterResp, hardenCluster) - if err != nil { - return nil, err - } - } else { - err = rke2Hardening.PostRKE2HardeningConfig(nodes, rolesPerNode) - if err != nil { - return nil, err - } - } - } - - createdCluster, err := client.Steve. - SteveType(clusters.ProvisioningSteveResourceType). - ByID(namespace + "/" + clusterName) - return createdCluster, err -} - -// CreateProvisioningRKE1Cluster provisions an rke1 cluster, then runs verify checks -func CreateProvisioningRKE1Cluster(client *rancher.Client, provider RKE1Provider, clustersConfig *clusters.ClusterConfig, nodeTemplate *nodetemplates.NodeTemplate) (*management.Cluster, error) { - if clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { - err := clusters.CreateRancherBaselinePSACT(client, clustersConfig.PSACT) - if err != nil { - return nil, err - } - } - - clusterName := namegen.AppendRandomString(provider.Name.String()) - cluster := clusters.NewRKE1ClusterConfig(clusterName, client, clustersConfig) - clusterResp, err := clusters.CreateRKE1Cluster(client, cluster) - if err != nil { - return nil, err - } - - if client.Flags.GetValue(environmentflag.UpdateClusterName) { - pipeline.UpdateConfigClusterName(clusterName) - } - - var nodeRoles []nodepools.NodeRoles - for _, nodes := range clustersConfig.NodePools { - nodeRoles = append(nodeRoles, nodes.NodeRoles) - } - _, err = nodepools.NodePoolSetup(client, nodeRoles, clusterResp.ID, nodeTemplate.ID) - if err != nil { - return nil, err - } - - createdCluster, err := client.Management.Cluster.ByID(clusterResp.ID) - return createdCluster, err -} - -// CreateProvisioningRKE1CustomCluster provisions an rke1 cluster using a 3rd party client for its nodes, then runs verify checks -func CreateProvisioningRKE1CustomCluster(client *rancher.Client, externalNodeProvider *ExternalNodeProvider, clustersConfig *clusters.ClusterConfig) (*management.Cluster, []*nodes.Node, error) { - setLogrusFormatter() - quantityPerPool := []int32{} - rolesPerPool := []string{} - for _, pool := range clustersConfig.NodePools { - var finalRoleCommand string - if pool.NodeRoles.ControlPlane { - finalRoleCommand += " --controlplane" - } - if pool.NodeRoles.Etcd { - finalRoleCommand += " --etcd" - } - if pool.NodeRoles.Worker { - finalRoleCommand += " --worker" - } - - quantityPerPool = append(quantityPerPool, int32(pool.NodeRoles.Quantity)) - rolesPerPool = append(rolesPerPool, finalRoleCommand) - } - - if clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { - err := clusters.CreateRancherBaselinePSACT(client, clustersConfig.PSACT) - if err != nil { - return nil, nil, err - } - } - - nodes, err := externalNodeProvider.NodeCreationFunc(client, rolesPerPool, quantityPerPool) - if err != nil { - return nil, nil, err - } - - clusterName := namegen.AppendRandomString(externalNodeProvider.Name) - - cluster := clusters.NewRKE1ClusterConfig(clusterName, client, clustersConfig) - - if clustersConfig.Hardened { - err = rke1Hardening.HardenRKE1Nodes(nodes, rolesPerPool) - if err != nil { - return nil, nil, err - } - - cluster = clusters.HardenRKE1ClusterConfig(client, clusterName, clustersConfig) - } - - clusterResp, err := clusters.CreateRKE1Cluster(client, cluster) - if err != nil { - return nil, nil, err - } - - if client.Flags.GetValue(environmentflag.UpdateClusterName) { - pipeline.UpdateConfigClusterName(clusterName) - } - - client, err = client.ReLogin() - if err != nil { - return nil, nil, err - } - - customCluster, err := client.Management.Cluster.ByID(clusterResp.ID) - if err != nil { - return nil, nil, err - } - - token, err := tokenregistration.GetRegistrationToken(client, customCluster.ID) - if err != nil { - return nil, nil, err - } - - adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) - if err != nil { - return nil, nil, err - } - - result, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ - FieldSelector: "metadata.name=" + customCluster.ID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, - }) - if err != nil { - return nil, nil, err - } - - checkFunc := clusters.IsHostedProvisioningClusterReady - - var command string - totalNodesObserved := 0 - for poolIndex, poolRole := range rolesPerPool { - for nodeIndex := 0; nodeIndex < int(quantityPerPool[poolIndex]); nodeIndex++ { - node := nodes[totalNodesObserved+nodeIndex] - - logrus.Infof("Execute Registration Command for node %s", node.NodeID) - logrus.Infof("Linux pool detected, using bash...") - - command = fmt.Sprintf("%s %s", token.NodeCommand, poolRole) - command = createRKE1RegistrationCommand(command, node.PublicIPAddress, node.PrivateIPAddress, clustersConfig.NodePools[poolIndex]) - logrus.Infof("Command: %s", command) - - output, err := node.ExecuteCommand(command) - if err != nil { - return nil, nil, err - } - logrus.Infof(output) - } - totalNodesObserved += int(quantityPerPool[poolIndex]) - } - - err = wait.WatchWait(result, checkFunc) - if err != nil { - return nil, nil, err - } - - if clustersConfig.Hardened { - err = rke1Hardening.PostRKE1HardeningConfig(nodes, rolesPerPool) - if err != nil { - return nil, nil, err - } - } - - createdCluster, err := client.Management.Cluster.ByID(clusterResp.ID) - - return createdCluster, nodes, err -} - -// CreateProvisioningAirgapCustomCluster provisions a non-rke1 cluster using corral to gather its nodes, then runs verify checks -func CreateProvisioningAirgapCustomCluster(client *rancher.Client, clustersConfig *clusters.ClusterConfig, corralPackages *corral.Packages) (*v1.SteveAPIObject, error) { - setLogrusFormatter() - rolesPerNode := map[int32]string{} - for _, pool := range clustersConfig.MachinePools { - var finalRoleCommand string - if pool.MachinePoolConfig.ControlPlane { - finalRoleCommand += " --controlplane" - } - if pool.MachinePoolConfig.Etcd { - finalRoleCommand += " --etcd" - } - if pool.MachinePoolConfig.Worker { - finalRoleCommand += " --worker" - } - if pool.MachinePoolConfig.Windows { - finalRoleCommand += " --windows" - } - - rolesPerNode[pool.MachinePoolConfig.Quantity] = finalRoleCommand - } - - if clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { - err := clusters.CreateRancherBaselinePSACT(client, clustersConfig.PSACT) - if err != nil { - return nil, err - } - } - - clusterName := namegen.AppendRandomString(rke2k3sAirgapCustomCluster) - - cluster := clusters.NewK3SRKE2ClusterConfig(clusterName, namespace, clustersConfig, nil, "") - - clusterResp, err := clusters.CreateK3SRKE2Cluster(client, cluster) - if err != nil { - return nil, err - } - - client, err = client.ReLogin() - if err != nil { - return nil, err - } - - customCluster, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(clusterResp.ID) - if err != nil { - return nil, err - } - - clusterStatus := &apiv1.ClusterStatus{} - err = v1.ConvertToK8sType(customCluster.Status, clusterStatus) - if err != nil { - return nil, err - } - - token, err := tokenregistration.GetRegistrationToken(client, clusterStatus.ClusterName) - if err != nil { - return nil, err - } - - logrus.Infof("Register Custom Cluster Through Corral") - for quantity, roles := range rolesPerNode { - err = corral.UpdateCorralConfig("node_count", fmt.Sprint(quantity)) - if err != nil { - return nil, err - } - - command := fmt.Sprintf("%s %s", token.InsecureNodeCommand, roles) - logrus.Infof("registration command is %s", command) - err = corral.UpdateCorralConfig("registration_command", command) - if err != nil { - return nil, err - } - - corralName := namegen.AppendRandomString(rke2k3sNodeCorralName) - _, err = corral.CreateCorral( - client.Session, - corralName, - corralPackages.CorralPackageImages[corralPackageAirgapCustomClusterName], - corralPackages.HasDebug, - corralPackages.HasCleanup, - ) - if err != nil { - return nil, err - } - } - - createdCluster, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(namespace + "/" + clusterName) - return createdCluster, err -} - -// CreateProvisioningRKE1AirgapCustomCluster provisions an rke1 cluster using corral to gather its nodes, then runs verify checks -func CreateProvisioningRKE1AirgapCustomCluster(client *rancher.Client, clustersConfig *clusters.ClusterConfig, corralPackages *corral.Packages) (*management.Cluster, error) { - setLogrusFormatter() - clusterName := namegen.AppendRandomString(rke1AirgapCustomCluster) - rolesPerNode := map[int64]string{} - for _, pool := range clustersConfig.NodePools { - var finalRoleCommand string - if pool.NodeRoles.ControlPlane { - finalRoleCommand += " --controlplane" - } - if pool.NodeRoles.Etcd { - finalRoleCommand += " --etcd" - } - if pool.NodeRoles.Worker { - finalRoleCommand += " --worker" - } - - rolesPerNode[pool.NodeRoles.Quantity] = finalRoleCommand - } - - if clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { - err := clusters.CreateRancherBaselinePSACT(client, clustersConfig.PSACT) - if err != nil { - return nil, err - } - } - - cluster := clusters.NewRKE1ClusterConfig(clusterName, client, clustersConfig) - clusterResp, err := clusters.CreateRKE1Cluster(client, cluster) - if err != nil { - return nil, err - } - - client, err = client.ReLogin() - if err != nil { - return nil, err - } - - customCluster, err := client.Management.Cluster.ByID(clusterResp.ID) - if err != nil { - return nil, err - } - - token, err := tokenregistration.GetRegistrationToken(client, customCluster.ID) - if err != nil { - return nil, err - } - - logrus.Infof("Register Custom Cluster Through Corral") - for quantity, roles := range rolesPerNode { - err = corral.UpdateCorralConfig("node_count", fmt.Sprint(quantity)) - if err != nil { - return nil, err - } - - command := fmt.Sprintf("%s %s", token.NodeCommand, roles) - logrus.Infof("registration command is %s", command) - err = corral.UpdateCorralConfig("registration_command", command) - if err != nil { - return nil, err - } - - corralName := namegen.AppendRandomString(rke1NodeCorralName) - - _, err = corral.CreateCorral( - client.Session, - corralName, - corralPackages.CorralPackageImages[corralPackageAirgapCustomClusterName], - corralPackages.HasDebug, - corralPackages.HasCleanup, - ) - if err != nil { - return nil, err - } - } - createdCluster, err := client.Management.Cluster.ByID(clusterResp.ID) - return createdCluster, err -} - -// CreateProvisioningAKSHostedCluster provisions an AKS cluster, then runs verify checks -func CreateProvisioningAKSHostedCluster(client *rancher.Client, aksClusterConfig aks.ClusterConfig) (*management.Cluster, error) { - cloudCredential, err := azure.CreateAzureCloudCredentials(client) - if err != nil { - return nil, err - } - - clusterName := namegen.AppendRandomString("akshostcluster") - clusterResp, err := aks.CreateAKSHostedCluster(client, clusterName, cloudCredential.ID, aksClusterConfig, false, false, false, false, nil) - if err != nil { - return nil, err - } - - if client.Flags.GetValue(environmentflag.UpdateClusterName) { - pipeline.UpdateConfigClusterName(clusterName) - } - - client, err = client.ReLogin() - if err != nil { - return nil, err - } - - return client.Management.Cluster.ByID(clusterResp.ID) -} - -// CreateProvisioningEKSHostedCluster provisions an EKS cluster, then runs verify checks -func CreateProvisioningEKSHostedCluster(client *rancher.Client, eksClusterConfig eks.ClusterConfig) (*management.Cluster, error) { - cloudCredential, err := aws.CreateAWSCloudCredentials(client) - if err != nil { - return nil, err - } - - clusterName := namegen.AppendRandomString("ekshostcluster") - clusterResp, err := eks.CreateEKSHostedCluster(client, clusterName, cloudCredential.ID, eksClusterConfig, false, false, false, false, nil) - if err != nil { - return nil, err - } - - if client.Flags.GetValue(environmentflag.UpdateClusterName) { - pipeline.UpdateConfigClusterName(clusterName) - } - - client, err = client.ReLogin() - if err != nil { - return nil, err - } - - return client.Management.Cluster.ByID(clusterResp.ID) -} - -// CreateProvisioningGKEHostedCluster provisions an GKE cluster, then runs verify checks -func CreateProvisioningGKEHostedCluster(client *rancher.Client, gkeClusterConfig gke.ClusterConfig) (*management.Cluster, error) { - cloudCredential, err := google.CreateGoogleCloudCredentials(client) - if err != nil { - return nil, err - } - - clusterName := namegen.AppendRandomString("gkehostcluster") - clusterResp, err := gke.CreateGKEHostedCluster(client, clusterName, cloudCredential.ID, gkeClusterConfig, false, false, false, false, nil) - if err != nil { - return nil, err - } - - if client.Flags.GetValue(environmentflag.UpdateClusterName) { - pipeline.UpdateConfigClusterName(clusterName) - } - - client, err = client.ReLogin() - if err != nil { - return nil, err - } - - return client.Management.Cluster.ByID(clusterResp.ID) -} - -func setLogrusFormatter() { - formatter := &logrus.TextFormatter{} - formatter.DisableQuote = true - logrus.SetFormatter(formatter) -} - -// createRKE1RegistrationCommand is a helper for rke1 custom clusters to create the registration command with advanced options configured per node -func createRKE1RegistrationCommand(command, publicIP, privateIP string, nodePool provisioninginput.NodePools) string { - if nodePool.SpecifyCustomPublicIP { - command += fmt.Sprintf(" --address %s", publicIP) - } - if nodePool.SpecifyCustomPrivateIP { - command += fmt.Sprintf(" --internal-address %s", privateIP) - } - if nodePool.CustomNodeNameSuffix != "" { - command += fmt.Sprintf(" --node-name %s", namegen.AppendRandomString(nodePool.CustomNodeNameSuffix)) - } - for labelKey, labelValue := range nodePool.NodeLabels { - command += fmt.Sprintf(" --label %s=%s", labelKey, labelValue) - } - for _, taint := range nodePool.NodeTaints { - command += fmt.Sprintf(" --taints %s=%s:%s", taint.Key, taint.Value, taint.Effect) - } - return command -} - -// createRegistrationCommand is a helper for rke2/k3s custom clusters to create the registration command with advanced options configured per node -func createRegistrationCommand(command, publicIP, privateIP string, machinePool provisioninginput.MachinePools) string { - if machinePool.SpecifyCustomPublicIP { - command += fmt.Sprintf(" --address %s", publicIP) - } - if machinePool.SpecifyCustomPrivateIP { - command += fmt.Sprintf(" --internal-address %s", privateIP) - } - if machinePool.CustomNodeNameSuffix != "" { - command += fmt.Sprintf(" --node-name %s", namegen.AppendRandomString(machinePool.CustomNodeNameSuffix)) - } - for labelKey, labelValue := range machinePool.NodeLabels { - command += fmt.Sprintf(" --label %s=%s", labelKey, labelValue) - } - for _, taint := range machinePool.NodeTaints { - command += fmt.Sprintf(" --taints %s=%s:%s", taint.Key, taint.Value, taint.Effect) - } - return command -} - -// createWindowsRegistrationCommand is a helper for rke2 windows custom clusters to create the registration command with advanced options configured per node -func createWindowsRegistrationCommand(command, publicIP, privateIP string, machinePool provisioninginput.MachinePools) string { - if machinePool.SpecifyCustomPublicIP { - command += fmt.Sprintf(" -Address '%s'", publicIP) - } - if machinePool.SpecifyCustomPrivateIP { - command += fmt.Sprintf(" -InternalAddress '%s'", privateIP) - } - if machinePool.CustomNodeNameSuffix != "" { - command += fmt.Sprintf(" -NodeName '%s'", namegen.AppendRandomString(machinePool.CustomNodeNameSuffix)) - } - // powershell requires only 1 flag per command, so we need to append the custom labels and taints together which is different from linux - if len(machinePool.NodeLabels) > 0 { - // there is an existing label for all windows nodes, so we need to insert the custom labels after the existing label - labelIndex := strings.Index(command, " -Label '") + len(" -Label '") - customLabels := "" - for labelKey, labelValue := range machinePool.NodeLabels { - customLabels += fmt.Sprintf("%s=%s,", labelKey, labelValue) - } - command = command[:labelIndex] + customLabels + command[labelIndex:] - } - if len(machinePool.NodeTaints) > 0 { - var customTaints string - for _, taint := range machinePool.NodeTaints { - customTaints += fmt.Sprintf("%s=%s:%s,", taint.Key, taint.Value, taint.Effect) - } - wrappedTaints := fmt.Sprintf(" -Taint '%s'", customTaints) - command += wrappedTaints - } - return command -} - -// AddRKE2K3SCustomClusterNodes is a helper method that will add nodes to the custom RKE2/K3S custom cluster. -func AddRKE2K3SCustomClusterNodes(client *rancher.Client, cluster *v1.SteveAPIObject, nodes []*nodes.Node, rolesPerNode []string) error { - clusterStatus := &apiv1.ClusterStatus{} - err := v1.ConvertToK8sType(cluster.Status, clusterStatus) - if err != nil { - return err - } - - token, err := tokenregistration.GetRegistrationToken(client, clusterStatus.ClusterName) - if err != nil { - return err - } - - var command string - for key, node := range nodes { - logrus.Infof("Adding node %s to cluster %s", node.NodeID, cluster.Name) - if strings.Contains(rolesPerNode[key], "windows") { - command = fmt.Sprintf("powershell.exe %s -Address %s", token.InsecureWindowsNodeCommand, node.PublicIPAddress) - } else { - command = fmt.Sprintf("%s %s --address %s", token.InsecureNodeCommand, rolesPerNode[key], node.PublicIPAddress) - } - - output, err := node.ExecuteCommand(command) - if err != nil { - return err - } - - logrus.Infof(output) - } - - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - clusterResp, err := client.Steve.SteveType(clusters.ProvisioningSteveResourceType).ByID(cluster.ID) - if err != nil { - return false, err - } - - if clusterResp.ObjectMeta.State.Name == active && - nodestat.AllMachineReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) == nil { - return true, nil - } - return false, nil - }) - if err != nil { - return err - } - - return nil -} - -// DeleteRKE2K3SCustomClusterNodes is a method that will delete nodes from the custom RKE2/K3S custom cluster. -func DeleteRKE2K3SCustomClusterNodes(client *rancher.Client, clusterID string, cluster *v1.SteveAPIObject, nodesToDelete []*nodes.Node) error { - steveclient, err := client.Steve.ProxyDownstream(clusterID) - if err != nil { - return err - } - - nodesSteveObjList, err := steveclient.SteveType("node").List(nil) - if err != nil { - return err - } - - for _, nodeToDelete := range nodesToDelete { - for _, node := range nodesSteveObjList.Data { - snippedIP := strings.Split(node.Annotations[internalIP], ",")[0] - - if snippedIP == nodeToDelete.PrivateIPAddress { - machine, err := client.Steve.SteveType(machineSteveResourceType).ByID(namespace + "/" + node.Annotations[machineNameAnnotation]) - if err != nil { - return err - } - - logrus.Infof("Deleting node %s from cluster %s", nodeToDelete.NodeID, cluster.Name) - err = client.Steve.SteveType(machineSteveResourceType).Delete(machine) - if err != nil { - return err - } - - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - _, err = client.Steve.SteveType(machineSteveResourceType).ByID(machine.ID) - if err != nil { - logrus.Infof("Node has successfully been deleted!") - return true, nil - } - return false, nil - }) - if err != nil { - return err - } - } - } - } - - return nil -} - -// AddRKE1CustomClusterNodes is a method that will add nodes to the custom RKE1 custom cluster. -func AddRKE1CustomClusterNodes(client *rancher.Client, cluster *management.Cluster, nodes []*nodes.Node, rolesPerNode []string) error { - token, err := tokenregistration.GetRegistrationToken(client, cluster.ID) - if err != nil { - return err - } - - var command string - for key, node := range nodes { - logrus.Infof("Adding node %s to cluster %s", node.NodeID, cluster.Name) - command = fmt.Sprintf("%s %s --address %s", token.NodeCommand, rolesPerNode[key], node.PublicIPAddress) - - output, err := node.ExecuteCommand(command) - if err != nil { - return err - } - - logrus.Infof(output) - } - - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - client, err = client.ReLogin() - if err != nil { - return false, err - } - - clusterResp, err := client.Management.Cluster.ByID(cluster.ID) - if err != nil { - return false, err - } - - if clusterResp.State == active && - nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) == nil { - return true, nil - } - return false, nil - }) - if err != nil { - return err - } - - return nil -} - -// DeleteRKE1CustomClusterNodes is a helper method that will delete nodes from the custom RKE1 custom cluster. -func DeleteRKE1CustomClusterNodes(client *rancher.Client, cluster *management.Cluster, nodesToDelete []*nodes.Node) error { - nodes, err := client.Management.Node.ListAll(&types.ListOpts{Filters: map[string]interface{}{ - "clusterId": cluster.ID, - }}) - if err != nil { - return err - } - - for _, nodeToDelete := range nodesToDelete { - for _, node := range nodes.Data { - if node.Annotations[rke1ExternalIP] == nodeToDelete.PublicIPAddress { - machine, err := client.Management.Node.ByID(node.ID) - if err != nil { - return err - } - - logrus.Infof("Deleting node %s from cluster %s", nodeToDelete.NodeID, cluster.Name) - err = client.Management.Node.Delete(machine) - if err != nil { - return err - } - - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - _, err = client.Management.Node.ByID(machine.ID) - if err != nil { - logrus.Infof("Node has successfully been deleted!") - return true, nil - } - return false, nil - }) - if err != nil { - return err - } - } - } - } - - return nil -} diff --git a/extensions/provisioning/customcluster.go b/extensions/provisioning/customcluster.go deleted file mode 100644 index e2b37ee8..00000000 --- a/extensions/provisioning/customcluster.go +++ /dev/null @@ -1,14 +0,0 @@ -package provisioning - -import ( - corev1 "k8s.io/api/core/v1" -) - -type CustomClusterConfig struct { - ExternalNodeProvider ExternalNodeProvider `json:"externalNodeProvider" yaml:"externalNodeProvider"` - NodeLabels map[string]string `json:"nodeLabels" yaml:"nodeLabels"` - NodeTaints []corev1.Taint `json:"nodeTaints" yaml:"nodeTaints"` - SpecifyPrivateIP bool `json:"specifyPrivateIP" yaml:"specifyPrivateIP"` - SpecifyPublicIP bool `json:"specifyPublicIP" yaml:"specifyPublicIP"` - NodeNamePrefix string `json:"nodeNamePrefix" yaml:"nodeNamePrefix"` -} diff --git a/extensions/provisioning/nodeproviders.go b/extensions/provisioning/nodeproviders.go deleted file mode 100644 index 1db76fbd..00000000 --- a/extensions/provisioning/nodeproviders.go +++ /dev/null @@ -1,63 +0,0 @@ -package provisioning - -import ( - "fmt" - - "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/nodes/ec2" - "github.com/rancher/shepherd/pkg/config" - "github.com/rancher/shepherd/pkg/nodes" -) - -const ( - ec2NodeProviderName = "ec2" - fromConfig = "config" -) - -type NodeCreationFunc func(client *rancher.Client, rolesPerPool []string, quantityPerPool []int32) (nodes []*nodes.Node, err error) -type NodeDeletionFunc func(client *rancher.Client, nodes []*nodes.Node) error - -type ExternalNodeProvider struct { - Name string - NodeCreationFunc NodeCreationFunc - NodeDeletionFunc NodeDeletionFunc -} - -// ExternalNodeProviderSetup is a helper function that setups an ExternalNodeProvider object is a wrapper -// for the specific outside node provider node creator function -func ExternalNodeProviderSetup(providerType string) ExternalNodeProvider { - switch providerType { - case ec2NodeProviderName: - return ExternalNodeProvider{ - Name: providerType, - NodeCreationFunc: ec2.CreateNodes, - NodeDeletionFunc: ec2.DeleteNodes, - } - case fromConfig: - return ExternalNodeProvider{ - Name: providerType, - NodeCreationFunc: func(client *rancher.Client, rolesPerPool []string, quantityPerPool []int32) (nodesList []*nodes.Node, err error) { - var nodeConfig nodes.ExternalNodeConfig - config.LoadConfig(nodes.ExternalNodeConfigConfigurationFileKey, &nodeConfig) - - nodesList = nodeConfig.Nodes[-1] - - for _, node := range nodesList { - sshKey, err := nodes.GetSSHKey(node.SSHKeyName) - if err != nil { - return nil, err - } - - node.SSHKey = sshKey - } - return nodesList, nil - }, - NodeDeletionFunc: func(client *rancher.Client, nodes []*nodes.Node) error { - return ec2.DeleteNodes(client, nodes) - }, - } - default: - panic(fmt.Sprintf("Node Provider:%v not found", providerType)) - } - -} diff --git a/extensions/provisioning/providers.go b/extensions/provisioning/providers.go deleted file mode 100644 index d921b2a7..00000000 --- a/extensions/provisioning/providers.go +++ /dev/null @@ -1,146 +0,0 @@ -package provisioning - -import ( - "fmt" - - "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/cloudcredentials" - "github.com/rancher/shepherd/extensions/cloudcredentials/aws" - "github.com/rancher/shepherd/extensions/cloudcredentials/azure" - "github.com/rancher/shepherd/extensions/cloudcredentials/digitalocean" - "github.com/rancher/shepherd/extensions/cloudcredentials/harvester" - "github.com/rancher/shepherd/extensions/cloudcredentials/linode" - "github.com/rancher/shepherd/extensions/cloudcredentials/vsphere" - "github.com/rancher/shepherd/extensions/machinepools" - "github.com/rancher/shepherd/extensions/provisioninginput" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - "github.com/rancher/shepherd/extensions/rke1/nodetemplates" - r1aws "github.com/rancher/shepherd/extensions/rke1/nodetemplates/aws" - r1azure "github.com/rancher/shepherd/extensions/rke1/nodetemplates/azure" - r1harvester "github.com/rancher/shepherd/extensions/rke1/nodetemplates/harvester" - r1linode "github.com/rancher/shepherd/extensions/rke1/nodetemplates/linode" - r1vsphere "github.com/rancher/shepherd/extensions/rke1/nodetemplates/vsphere" -) - -type CloudCredFunc func(rancherClient *rancher.Client) (*cloudcredentials.CloudCredential, error) -type MachinePoolFunc func(generatedPoolName, namespace string) []unstructured.Unstructured - -type Provider struct { - Name provisioninginput.ProviderName - MachineConfigPoolResourceSteveType string - MachinePoolFunc MachinePoolFunc - CloudCredFunc CloudCredFunc - Roles []machinepools.Roles -} - -// CreateProvider returns all machine and cloud credential -// configs in the form of a Provider struct. Accepts a -// string of the name of the provider. -func CreateProvider(name string) Provider { - switch { - case name == provisioninginput.AWSProviderName.String(): - provider := Provider{ - Name: provisioninginput.AWSProviderName, - MachineConfigPoolResourceSteveType: machinepools.AWSPoolType, - MachinePoolFunc: machinepools.NewAWSMachineConfig, - CloudCredFunc: aws.CreateAWSCloudCredentials, - Roles: machinepools.GetAWSMachineRoles(), - } - return provider - case name == provisioninginput.AzureProviderName.String(): - provider := Provider{ - Name: provisioninginput.AzureProviderName, - MachineConfigPoolResourceSteveType: machinepools.AzurePoolType, - MachinePoolFunc: machinepools.NewAzureMachineConfig, - CloudCredFunc: azure.CreateAzureCloudCredentials, - Roles: machinepools.GetAzureMachineRoles(), - } - return provider - case name == provisioninginput.DOProviderName.String(): - provider := Provider{ - Name: provisioninginput.DOProviderName, - MachineConfigPoolResourceSteveType: machinepools.DOPoolType, - MachinePoolFunc: machinepools.NewDigitalOceanMachineConfig, - CloudCredFunc: digitalocean.CreateDigitalOceanCloudCredentials, - Roles: machinepools.GetDOMachineRoles(), - } - return provider - case name == provisioninginput.LinodeProviderName.String(): - provider := Provider{ - Name: provisioninginput.LinodeProviderName, - MachineConfigPoolResourceSteveType: machinepools.LinodePoolType, - MachinePoolFunc: machinepools.NewLinodeMachineConfig, - CloudCredFunc: linode.CreateLinodeCloudCredentials, - Roles: machinepools.GetLinodeMachineRoles(), - } - return provider - case name == provisioninginput.HarvesterProviderName.String(): - provider := Provider{ - Name: provisioninginput.HarvesterProviderName, - MachineConfigPoolResourceSteveType: machinepools.HarvesterPoolType, - MachinePoolFunc: machinepools.NewHarvesterMachineConfig, - CloudCredFunc: harvester.CreateHarvesterCloudCredentials, - Roles: machinepools.GetHarvesterMachineRoles(), - } - return provider - case name == provisioninginput.VsphereProviderName.String(): - provider := Provider{ - Name: provisioninginput.VsphereProviderName, - MachineConfigPoolResourceSteveType: machinepools.VmwarevsphereType, - MachinePoolFunc: machinepools.NewVSphereMachineConfig, - CloudCredFunc: vsphere.CreateVsphereCloudCredentials, - Roles: machinepools.GetVsphereMachineRoles(), - } - return provider - default: - panic(fmt.Sprintf("Provider:%v not found", name)) - } -} - -type NodeTemplateFunc func(rancherClient *rancher.Client) (*nodetemplates.NodeTemplate, error) - -type RKE1Provider struct { - Name provisioninginput.ProviderName - NodeTemplateFunc NodeTemplateFunc -} - -// CreateProvider returns all node template -// configs in the form of a RKE1Provider struct. Accepts a -// string of the name of the provider. -func CreateRKE1Provider(name string) RKE1Provider { - switch { - case name == provisioninginput.AWSProviderName.String(): - provider := RKE1Provider{ - Name: provisioninginput.AWSProviderName, - NodeTemplateFunc: r1aws.CreateAWSNodeTemplate, - } - return provider - case name == provisioninginput.AzureProviderName.String(): - provider := RKE1Provider{ - Name: provisioninginput.AzureProviderName, - NodeTemplateFunc: r1azure.CreateAzureNodeTemplate, - } - return provider - case name == provisioninginput.HarvesterProviderName.String(): - provider := RKE1Provider{ - Name: provisioninginput.HarvesterProviderName, - NodeTemplateFunc: r1harvester.CreateHarvesterNodeTemplate, - } - return provider - case name == provisioninginput.LinodeProviderName.String(): - provider := RKE1Provider{ - Name: provisioninginput.LinodeProviderName, - NodeTemplateFunc: r1linode.CreateLinodeNodeTemplate, - } - return provider - case name == provisioninginput.VsphereProviderName.String(): - provider := RKE1Provider{ - Name: provisioninginput.VsphereProviderName, - NodeTemplateFunc: r1vsphere.CreateVSphereNodeTemplate, - } - return provider - default: - panic(fmt.Sprintf("RKE1Provider:%v not found", name)) - } -} diff --git a/extensions/provisioning/ssh.go b/extensions/provisioning/ssh.go deleted file mode 100644 index 12b09406..00000000 --- a/extensions/provisioning/ssh.go +++ /dev/null @@ -1,89 +0,0 @@ -package provisioning - -// This file contains all tests that require to ssh into a node to run commands to check things -// such as any stats, benchmarks, etc. For example, ssh is required to check the cpu usage of a -// process running on an individual node. - -import ( - "errors" - "strconv" - "strings" - - "time" - - "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/defaults" - extnodes "github.com/rancher/shepherd/extensions/nodes" - "github.com/rancher/shepherd/extensions/provisioninginput" - "github.com/rancher/shepherd/pkg/nodes" - "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" - "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - cpuUsageVar = 100 // 100 is just a placeholder until we can determine an actual number. Even with cpu usage spiking it should not go past 100% cpu usage and previous issues concerning this were hitting around 130% and above - checkCPU provisioninginput.SSHTestCase = "CheckCPU" - checkCPUCommand = "ps -C agent -o %cpu --no-header" - nodeReboot provisioninginput.SSHTestCase = "NodeReboot" - activeState = "active" - runningState = "running" - fleetNamespace = "fleet-default" -) - -// CallSSHTestByName tests the ssh tests specified in the provisioninginput config clusterSSHTests field. -// For example CheckCPU checks the cpu usage of the cluster agent. If the usage is too high the func will return a warning. -func CallSSHTestByName(testCase provisioninginput.SSHTestCase, node *nodes.Node, client *rancher.Client, clusterID string, machineName string) error { - switch testCase { - case checkCPU: - logrus.Infof("Running CheckCPU test on node %s", node.PublicIPAddress) - output, err := node.ExecuteCommand(checkCPUCommand) - if err != nil { - return err - } - strOutput := output[:strings.IndexByte(output, '\n')] - logrus.Info("CheckCPU test on node " + node.PublicIPAddress + " | Cluster agent cpu usage is: " + strOutput + "%") - - outputInt, err := strconv.ParseFloat(strings.TrimSpace(strOutput), 32) - if outputInt > cpuUsageVar { - logrus.Warn("Cluster agent cpu usage is too high on node" + node.PublicIPAddress + " | Current cpu usage is: " + strOutput + "%") - } - if err != nil { - return err - } - case nodeReboot: - logrus.Infof("Running NodeReboot test on node %s", node.PublicIPAddress) - command := "sudo reboot" - _, err := node.ExecuteCommand(command) - if err != nil && !errors.Is(err, &ssh.ExitMissingError{}) { - return err - } - // Verify machine shuts down within five minutes, shutting down should not take longer than that depending on the ami - err = wait.Poll(1*time.Second, defaults.FiveMinuteTimeout, func() (bool, error) { - newNode, err := client.Steve.SteveType(machineSteveResourceType).ByID(fleetNamespace + "/" + machineName) - if err != nil { - return false, err - } - if newNode.State.Name == runningState { - return false, nil - } - return true, nil - }) - if err != nil { - logrus.Errorf("Node %s was unable to reboot successfully | Cluster %s is still in active state", node.PublicIPAddress, clusterID) - return err - } - - err = extnodes.AllMachineReady(client, clusterID, defaults.TenMinuteTimeout) - if err != nil { - logrus.Errorf("Node %s failed to reboot successfully", node.PublicIPAddress) - return err - } - - return err - default: - err := errors.New("Invalid SSH test: " + string(testCase) + " is spelled incorrectly or does not exist.") - return err - } - return nil -} diff --git a/extensions/provisioning/upgrade.go b/extensions/provisioning/upgrade.go deleted file mode 100644 index 0259f9ad..00000000 --- a/extensions/provisioning/upgrade.go +++ /dev/null @@ -1,41 +0,0 @@ -package provisioning - -import ( - "fmt" - - "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/clusters/bundledclusters" -) - -// UpgradeClusterK8sVersion upgrades the cluster to the specified version -func UpgradeClusterK8sVersion(client *rancher.Client, clusterName *string, upgradeVersion *string) (*bundledclusters.BundledCluster, error) { - clusterMeta, err := clusters.NewClusterMeta(client, *clusterName) - if err != nil { - return nil, err - } - if clusterMeta == nil { - return nil, fmt.Errorf("cluster %s not found", *clusterName) - } - - initCluster, err := bundledclusters.NewWithClusterMeta(clusterMeta) - if err != nil { - return nil, err - } - - cluster, err := initCluster.Get(client) - if err != nil { - return nil, err - } - - updatedCluster, err := cluster.UpdateKubernetesVersion(client, upgradeVersion) - if err != nil { - return nil, err - } - - err = clusters.WaitClusterToBeUpgraded(client, clusterMeta.ID) - if err != nil { - return nil, err - } - return updatedCluster, nil -} diff --git a/extensions/provisioning/verify.go b/extensions/provisioning/verify.go deleted file mode 100644 index d9e347c7..00000000 --- a/extensions/provisioning/verify.go +++ /dev/null @@ -1,491 +0,0 @@ -package provisioning - -import ( - "context" - "fmt" - "net/url" - "strings" - "testing" - "time" - - provv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" - rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" - "github.com/rancher/shepherd/clients/rancher" - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - steveV1 "github.com/rancher/shepherd/clients/rancher/v1" - "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/extensions/clusters/bundledclusters" - "github.com/rancher/shepherd/extensions/defaults" - "github.com/rancher/shepherd/extensions/defaults/stevetypes" - "github.com/rancher/shepherd/extensions/etcdsnapshot" - "github.com/rancher/shepherd/extensions/kubeconfig" - nodestat "github.com/rancher/shepherd/extensions/nodes" - "github.com/rancher/shepherd/extensions/provisioninginput" - psadeploy "github.com/rancher/shepherd/extensions/psact" - "github.com/rancher/shepherd/extensions/registries" - "github.com/rancher/shepherd/extensions/sshkeys" - "github.com/rancher/shepherd/extensions/workloads/pods" - "github.com/rancher/shepherd/pkg/wait" - wranglername "github.com/rancher/wrangler/pkg/name" - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - kwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - capi "sigs.k8s.io/cluster-api/api/v1beta1" -) - -const ( - local = "local" - logMessageKubernetesVersion = "Validating the current version is the upgraded one" - hostnameLimit = 63 - etcdSnapshotAnnotation = "etcdsnapshot.rke.io/storage" - machineNameAnnotation = "cluster.x-k8s.io/machine" - machineSteveResourceType = "cluster.x-k8s.io.machine" - onDemandPrefix = "on-demand-" - s3 = "s3" -) - -// VerifyRKE1Cluster validates that the RKE1 cluster and its resources are in a good state, matching a given config. -func VerifyRKE1Cluster(t *testing.T, client *rancher.Client, clustersConfig *clusters.ClusterConfig, cluster *management.Cluster) { - client, err := client.ReLogin() - require.NoError(t, err) - - adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) - require.NoError(t, err) - - watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ - FieldSelector: "metadata.name=" + cluster.ID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, - }) - require.NoError(t, err) - - checkFunc := clusters.IsHostedProvisioningClusterReady - err = wait.WatchWait(watchInterface, checkFunc) - require.NoError(t, err) - - assert.Equal(t, clustersConfig.KubernetesVersion, cluster.RancherKubernetesEngineConfig.Version) - - clusterToken, err := clusters.CheckServiceAccountTokenSecret(client, cluster.Name) - require.NoError(t, err) - assert.NotEmpty(t, clusterToken) - - err = nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) - require.NoError(t, err) - - if clustersConfig.PSACT == string(provisioninginput.RancherPrivileged) || clustersConfig.PSACT == string(provisioninginput.RancherRestricted) || clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { - require.NotEmpty(t, cluster.DefaultPodSecurityAdmissionConfigurationTemplateName) - - err := psadeploy.CreateNginxDeployment(client, cluster.ID, clustersConfig.PSACT) - require.NoError(t, err) - } - if clustersConfig.Registries != nil { - if clustersConfig.Registries.RKE1Registries != nil { - for _, registry := range clustersConfig.Registries.RKE1Registries { - havePrefix, err := registries.CheckAllClusterPodsForRegistryPrefix(client, cluster.ID, registry.URL) - require.NoError(t, err) - assert.True(t, havePrefix) - } - } - } - if clustersConfig.Networking != nil { - if clustersConfig.Networking.LocalClusterAuthEndpoint != nil { - VerifyACE(t, adminClient, cluster) - } - } - - if clustersConfig.CloudProvider == "" { - podErrors := pods.StatusPods(client, cluster.ID) - assert.Empty(t, podErrors) - } -} - -// VerifyCluster validates that a non-rke1 cluster and its resources are in a good state, matching a given config. -func VerifyCluster(t *testing.T, client *rancher.Client, clustersConfig *clusters.ClusterConfig, cluster *steveV1.SteveAPIObject) { - client, err := client.ReLogin() - require.NoError(t, err) - - adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) - require.NoError(t, err) - - kubeProvisioningClient, err := adminClient.GetKubeAPIProvisioningClient() - require.NoError(t, err) - - watchInterface, err := kubeProvisioningClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ - FieldSelector: "metadata.name=" + cluster.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, - }) - require.NoError(t, err) - - checkFunc := clusters.IsProvisioningClusterReady - err = wait.WatchWait(watchInterface, checkFunc) - require.NoError(t, err) - - clusterToken, err := clusters.CheckServiceAccountTokenSecret(client, cluster.Name) - require.NoError(t, err) - assert.NotEmpty(t, clusterToken) - - err = nodestat.AllMachineReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) - require.NoError(t, err) - - status := &provv1.ClusterStatus{} - err = steveV1.ConvertToK8sType(cluster.Status, status) - require.NoError(t, err) - - clusterSpec := &provv1.ClusterSpec{} - err = steveV1.ConvertToK8sType(cluster.Spec, clusterSpec) - require.NoError(t, err) - - configKubeVersion := clusterSpec.KubernetesVersion - require.Equal(t, configKubeVersion, clusterSpec.KubernetesVersion) - - if clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName == string(provisioninginput.RancherPrivileged) || - clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName == string(provisioninginput.RancherRestricted) || - clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName == string(provisioninginput.RancherBaseline) { - - require.NotEmpty(t, clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName) - - err := psadeploy.CreateNginxDeployment(client, status.ClusterName, clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName) - require.NoError(t, err) - } - - if clusterSpec.RKEConfig.Registries != nil { - for registryName := range clusterSpec.RKEConfig.Registries.Configs { - havePrefix, err := registries.CheckAllClusterPodsForRegistryPrefix(client, status.ClusterName, registryName) - require.NoError(t, err) - assert.True(t, havePrefix) - } - } - - if clusterSpec.LocalClusterAuthEndpoint.Enabled { - mgmtClusterObject, err := adminClient.Management.Cluster.ByID(status.ClusterName) - require.NoError(t, err) - VerifyACE(t, adminClient, mgmtClusterObject) - } - - podErrors := pods.StatusPods(client, status.ClusterName) - assert.Empty(t, podErrors) - - if clustersConfig != nil { - if clustersConfig.ClusterSSHTests != nil { - VerifySSHTests(t, client, cluster, clustersConfig.ClusterSSHTests, status.ClusterName) - } - } -} - -// VerifyHostedCluster validates that the hosted cluster and its resources are in a good state, matching a given config. -func VerifyHostedCluster(t *testing.T, client *rancher.Client, cluster *management.Cluster) { - client, err := client.ReLogin() - require.NoError(t, err) - - adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) - require.NoError(t, err) - - watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ - FieldSelector: "metadata.name=" + cluster.ID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, - }) - require.NoError(t, err) - - checkFunc := clusters.IsHostedProvisioningClusterReady - - err = wait.WatchWait(watchInterface, checkFunc) - require.NoError(t, err) - - clusterToken, err := clusters.CheckServiceAccountTokenSecret(client, cluster.Name) - require.NoError(t, err) - assert.NotEmpty(t, clusterToken) - - err = nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) - require.NoError(t, err) - - podErrors := pods.StatusPods(client, cluster.ID) - assert.Empty(t, podErrors) -} - -// VerifyDeleteRKE1Cluster validates that a rke1 cluster and its resources are deleted. -func VerifyDeleteRKE1Cluster(t *testing.T, client *rancher.Client, clusterID string) { - cluster, err := client.Management.Cluster.ByID(clusterID) - require.NoError(t, err) - - adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) - require.NoError(t, err) - - watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ - FieldSelector: "metadata.name=" + clusterID, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, - }) - require.NoError(t, err) - - err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) { - if event.Type == watch.Error { - return false, fmt.Errorf("error: unable to delete cluster %s", cluster.Name) - } else if event.Type == watch.Deleted { - logrus.Infof("Cluster %s deleted!", cluster.Name) - return true, nil - } - return false, nil - }) - require.NoError(t, err) - - err = nodestat.AllNodeDeleted(client, clusterID) - require.NoError(t, err) -} - -// VerifyDeleteRKE2K3SCluster validates that a non-rke1 cluster and its resources are deleted. -func VerifyDeleteRKE2K3SCluster(t *testing.T, client *rancher.Client, clusterID string) { - cluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(clusterID) - require.NoError(t, err) - - adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) - require.NoError(t, err) - - provKubeClient, err := adminClient.GetKubeAPIProvisioningClient() - require.NoError(t, err) - - watchInterface, err := provKubeClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ - FieldSelector: "metadata.name=" + cluster.Name, - TimeoutSeconds: &defaults.WatchTimeoutSeconds, - }) - require.NoError(t, err) - - err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) { - cluster := event.Object.(*provv1.Cluster) - if event.Type == watch.Error { - return false, fmt.Errorf("error: unable to delete cluster %s", cluster.ObjectMeta.Name) - } else if event.Type == watch.Deleted { - logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name) - return true, nil - } else if cluster == nil { - logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name) - return true, nil - } - return false, nil - }) - require.NoError(t, err) - - err = nodestat.AllNodeDeleted(client, clusterID) - require.NoError(t, err) -} - -// CertRotationCompleteCheckFunc returns a watch check function that checks if the certificate rotation is complete -func CertRotationCompleteCheckFunc(generation int64) wait.WatchCheckFunc { - return func(event watch.Event) (bool, error) { - controlPlane := event.Object.(*rkev1.RKEControlPlane) - return controlPlane.Status.CertificateRotationGeneration == generation, nil - } -} - -// VerifyACE validates that the ACE resources are healthy in a given cluster -func VerifyACE(t *testing.T, client *rancher.Client, cluster *management.Cluster) { - client, err := client.ReLogin() - require.NoError(t, err) - - kubeConfig, err := kubeconfig.GetKubeconfig(client, cluster.ID) - require.NoError(t, err) - - original, err := client.SwitchContext(cluster.Name, kubeConfig) - require.NoError(t, err) - - originalResp, err := original.Resource(corev1.SchemeGroupVersion.WithResource("pods")).Namespace("").List(context.TODO(), metav1.ListOptions{}) - require.NoError(t, err) - for _, pod := range originalResp.Items { - t.Logf("Pod %v", pod.GetName()) - } - - // each control plane has a context. For ACE, we should check these contexts - contexts, err := kubeconfig.GetContexts(kubeConfig) - require.NoError(t, err) - var contextNames []string - for context := range contexts { - if strings.Contains(context, "pool") { - contextNames = append(contextNames, context) - } - } - - for _, contextName := range contextNames { - dynamic, err := client.SwitchContext(contextName, kubeConfig) - assert.NoError(t, err) - resp, err := dynamic.Resource(corev1.SchemeGroupVersion.WithResource("pods")).Namespace("").List(context.TODO(), metav1.ListOptions{}) - assert.NoError(t, err) - t.Logf("Switched Context to %v", contextName) - for _, pod := range resp.Items { - t.Logf("Pod %v", pod.GetName()) - } - } -} - -// VerifyHostnameLength validates that the hostnames of the nodes in a cluster are of the correct length -func VerifyHostnameLength(t *testing.T, client *rancher.Client, clusterObject *steveV1.SteveAPIObject) { - client, err := client.ReLogin() - require.NoError(t, err) - - clusterSpec := &provv1.ClusterSpec{} - err = steveV1.ConvertToK8sType(clusterObject.Spec, clusterSpec) - require.NoError(t, err) - - for _, mp := range clusterSpec.RKEConfig.MachinePools { - n := wranglername.SafeConcatName(clusterObject.Name, mp.Name) - query, err := url.ParseQuery(fmt.Sprintf("labelSelector=%s=%s&fieldSelector=metadata.name=%s", capi.ClusterNameLabel, clusterObject.Name, n)) - require.NoError(t, err) - - machineDeploymentsResp, err := client.Steve.SteveType("cluster.x-k8s.io.machinedeployment").List(query) - require.NoError(t, err) - - assert.True(t, len(machineDeploymentsResp.Data) == 1) - - md := &capi.MachineDeployment{} - require.NoError(t, steveV1.ConvertToK8sType(machineDeploymentsResp.Data[0].JSONResp, md)) - - query2, err := url.ParseQuery(fmt.Sprintf("labelSelector=%s=%s", capi.MachineDeploymentNameLabel, md.Name)) - require.NoError(t, err) - - machineResp, err := client.Steve.SteveType(machineSteveResourceType).List(query2) - require.NoError(t, err) - - assert.True(t, len(machineResp.Data) > 0) - - for i := range machineResp.Data { - m := capi.Machine{} - require.NoError(t, steveV1.ConvertToK8sType(machineResp.Data[i].JSONResp, &m)) - - assert.NotNil(t, m.Status.NodeRef) - - dynamic, err := client.GetRancherDynamicClient() - require.NoError(t, err) - - gv, err := schema.ParseGroupVersion(m.Spec.InfrastructureRef.APIVersion) - require.NoError(t, err) - - gvr := schema.GroupVersionResource{ - Group: gv.Group, - Version: gv.Version, - Resource: strings.ToLower(m.Spec.InfrastructureRef.Kind) + "s", - } - - ustr, err := dynamic.Resource(gvr).Namespace(m.Namespace).Get(context.TODO(), m.Spec.InfrastructureRef.Name, metav1.GetOptions{}) - require.NoError(t, err) - - limit := hostnameLimit - if mp.HostnameLengthLimit != 0 { - limit = mp.HostnameLengthLimit - } else if clusterSpec.RKEConfig.MachinePoolDefaults.HostnameLengthLimit != 0 { - limit = clusterSpec.RKEConfig.MachinePoolDefaults.HostnameLengthLimit - } - - assert.True(t, len(m.Status.NodeRef.Name) <= limit) - if len(ustr.GetName()) < limit { - assert.True(t, m.Status.NodeRef.Name == ustr.GetName()) - } - } - t.Logf("Verified hostname length for machine pool %s", mp.Name) - } -} - -// VerifyUpgrade validates that a cluster has been upgraded to a given version -func VerifyUpgrade(t *testing.T, updatedCluster *bundledclusters.BundledCluster, upgradedVersion string) { - if updatedCluster.V3 != nil { - assert.Equalf(t, upgradedVersion, updatedCluster.V3.RancherKubernetesEngineConfig.Version, "[%v]: %v", updatedCluster.Meta.Name, logMessageKubernetesVersion) - } else { - clusterSpec := &provv1.ClusterSpec{} - err := steveV1.ConvertToK8sType(updatedCluster.V1.Spec, clusterSpec) - require.NoError(t, err) - assert.Equalf(t, upgradedVersion, clusterSpec.KubernetesVersion, "[%v]: %v", updatedCluster.Meta.Name, logMessageKubernetesVersion) - } -} - -// VerifySnapshots waits for a cluster's snapshots to be ready and validates that the correct number of snapshots have been taken -func VerifySnapshots(client *rancher.Client, clusterName string, expectedSnapshotLength int, isRKE1 bool) (string, error) { - client, err := client.ReLogin() - if err != nil { - return "", err - } - - var snapshotToBeRestored string - var snapshotNameList []string - s3Prefix := onDemandPrefix + clusterName - err = kwait.PollUntilContextTimeout(context.TODO(), 5*time.Second, defaults.FiveMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - if isRKE1 { - snapshotObjectList, err := etcdsnapshot.GetRKE1Snapshots(client, clusterName) - if err != nil { - return false, err - } - - for _, snapshot := range snapshotObjectList { - snapshotNameList = append(snapshotNameList, snapshot.ID) - } - } else { - snapshotObjectList, err := etcdsnapshot.GetRKE2K3SSnapshots(client, clusterName) - if err != nil { - return false, err - } - - for _, snapshot := range snapshotObjectList { - snapshotNameList = append(snapshotNameList, snapshot.Name) - } - } - - if len(snapshotNameList) == 0 { - return false, fmt.Errorf("no snapshots found") - } - - if strings.Contains(fmt.Sprintf("%v", snapshotNameList), s3Prefix) { - snapshotSteveObjList, err := client.Steve.SteveType(stevetypes.EtcdSnapshot).List(nil) - if err != nil { - return false, err - } - - for _, snapshot := range snapshotSteveObjList.Data { - if snapshot.Annotations[etcdSnapshotAnnotation] == s3 { - snapshotToBeRestored = snapshot.Name - - return true, nil - } else if snapshot.Annotations[etcdSnapshotAnnotation] == local { - snapshotToBeRestored = snapshotNameList[len(snapshotNameList)-1] - - return true, nil - } - } - - return false, nil - } - - if len(snapshotNameList) == expectedSnapshotLength || len(snapshotNameList) > expectedSnapshotLength { - snapshotToBeRestored = snapshotNameList[0] - return true, nil - } - - return false, nil - }) - return snapshotToBeRestored, err -} - -// VerifySSHTests validates the ssh tests listed in the config on each node of the cluster -func VerifySSHTests(t *testing.T, client *rancher.Client, clusterObject *steveV1.SteveAPIObject, sshTests []provisioninginput.SSHTestCase, clusterID string) { - client, err := client.ReLogin() - require.NoError(t, err) - - steveClient, err := client.Steve.ProxyDownstream(clusterID) - require.NoError(t, err) - - nodesSteveObjList, err := steveClient.SteveType("node").List(nil) - require.NoError(t, err) - - sshUser, err := sshkeys.GetSSHUser(client, clusterObject) - require.NoError(t, err) - - for _, tests := range sshTests { - for _, machine := range nodesSteveObjList.Data { - clusterNode, err := sshkeys.GetSSHNodeFromMachine(client, sshUser, &machine) - require.NoError(t, err) - - machineName := machine.Annotations[machineNameAnnotation] - err = CallSSHTestByName(tests, clusterNode, client, clusterID, machineName) - require.NoError(t, err) - - } - } -} diff --git a/extensions/rke1/componentchecks/etcdversion.go b/extensions/rke1/componentchecks/etcdversion.go deleted file mode 100644 index 3c9cdfb0..00000000 --- a/extensions/rke1/componentchecks/etcdversion.go +++ /dev/null @@ -1,46 +0,0 @@ -package componentchecks - -import ( - "strings" - - "github.com/rancher/shepherd/clients/rancher" - "github.com/rancher/shepherd/pkg/nodes" - "github.com/sirupsen/logrus" -) - -// CheckETCDVersion will check the etcd version on the etcd node in the provisioned RKE1 cluster. -func CheckETCDVersion(client *rancher.Client, nodes []*nodes.Node, clusterID string) ([]string, error) { - steveClient, err := client.Steve.ProxyDownstream(clusterID) - if err != nil { - return nil, err - } - - nodesList, err := steveClient.SteveType("node").List(nil) - if err != nil { - return nil, err - } - - var etcdResult []string - - for _, rancherNode := range nodesList.Data { - externalIP := rancherNode.Annotations["rke.cattle.io/external-ip"] - etcdRole := rancherNode.Labels["node-role.kubernetes.io/etcd"] == "true" - - if etcdRole == true { - for _, node := range nodes { - if strings.Contains(node.PublicIPAddress, externalIP) { - command := "docker exec etcd etcdctl version" - output, err := node.ExecuteCommand(command) - if err != nil { - return []string{}, err - } - - etcdResult = append(etcdResult, output) - logrus.Infof(output) - } - } - } - } - - return etcdResult, nil -} diff --git a/extensions/rke1/nodepools/nodepools.go b/extensions/rke1/nodepools/nodepools.go deleted file mode 100644 index 94f2c2a6..00000000 --- a/extensions/rke1/nodepools/nodepools.go +++ /dev/null @@ -1,142 +0,0 @@ -package rke1 - -import ( - "context" - "strconv" - "time" - - "github.com/rancher/norman/types" - "github.com/rancher/shepherd/clients/rancher" - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/defaults" - nodestat "github.com/rancher/shepherd/extensions/nodes" - "github.com/sirupsen/logrus" - kwait "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - active = "active" -) - -type NodeRoles struct { - ControlPlane bool `json:"controlplane,omitempty" yaml:"controlplane,omitempty"` - Etcd bool `json:"etcd,omitempty" yaml:"etcd,omitempty"` - Worker bool `json:"worker,omitempty" yaml:"worker,omitempty"` - Quantity int64 `json:"quantity" yaml:"quantity"` - DrainBeforeDelete bool `json:"drainBeforeDelete,omitempty" yaml:"drainBeforeDelete,omitempty"` -} - -// NodePoolSetup is a helper method that will loop and setup multiple node pools with the defined node roles from the `nodeRoles` parameter -// `nodeRoles` would be in this format -// -// []map[string]bool{ -// { -// ControlPlane: true, -// Etcd: false, -// Worker: false, -// Quantity: 1, -// }, -// { -// ControlPlane: false, -// Etcd: true, -// Worker: false, -// Quantity: 1, -// }, -// } -func NodePoolSetup(client *rancher.Client, nodeRoles []NodeRoles, ClusterID, NodeTemplateID string) (*management.NodePool, error) { - nodePoolConfig := management.NodePool{ - ClusterID: ClusterID, - DeleteNotReadyAfterSecs: 0, - NodeTemplateID: NodeTemplateID, - } - - for index, roles := range nodeRoles { - nodePoolConfig.ControlPlane = roles.ControlPlane - nodePoolConfig.Etcd = roles.Etcd - nodePoolConfig.Worker = roles.Worker - nodePoolConfig.Quantity = roles.Quantity - nodePoolConfig.HostnamePrefix = "auto-rke1-" + strconv.Itoa(index) + ClusterID - nodePoolConfig.DrainBeforeDelete = roles.DrainBeforeDelete - - _, err := client.Management.NodePool.Create(&nodePoolConfig) - - if err != nil { - return nil, err - } - } - - return &nodePoolConfig, nil -} - -// MatchRKE1NodeRoles is a helper method that will return the desired node in the cluster, based on the node role. -func MatchRKE1NodeRoles(client *rancher.Client, cluster *management.Cluster, nodeRoles NodeRoles) (*management.Node, error) { - nodes, err := client.Management.Node.ListAll(&types.ListOpts{ - Filters: map[string]interface{}{ - "clusterId": cluster.ID, - }, - }) - if err != nil { - return nil, err - } - - for _, node := range nodes.Data { - if nodeRoles.ControlPlane != node.ControlPlane { - continue - } - if nodeRoles.Etcd != node.Etcd { - continue - } - if nodeRoles.Worker != node.Worker { - continue - } - - return &node, nil - } - - return nil, nil -} - -// updateNodePoolQuantity is a helper method that will update the node pool with the desired quantity. -func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, node *management.Node, nodeRoles NodeRoles) (*management.NodePool, error) { - updatedNodePool, err := client.Management.NodePool.ByID(node.NodePoolID) - if err != nil { - return nil, err - } - - updatedNodePool.Quantity += nodeRoles.Quantity - - logrus.Infof("Scaling the machine pool to %v total nodes", updatedNodePool.Quantity) - _, err = client.Management.NodePool.Update(updatedNodePool, &updatedNodePool) - if err != nil { - return nil, err - } - - err = kwait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, defaults.ThirtyMinuteTimeout, true, func(ctx context.Context) (done bool, err error) { - clusterResp, err := client.Management.Cluster.ByID(cluster.ID) - if err != nil { - return false, err - } - - if clusterResp.State == active && nodestat.AllManagementNodeReady(client, clusterResp.ID, defaults.ThirtyMinuteTimeout) == nil { - return true, nil - } - return false, nil - }) - if err != nil { - return nil, err - } - - return updatedNodePool, nil -} - -// ScaleNodePoolNodes is a helper method that will scale the node pool to the desired quantity. -func ScaleNodePoolNodes(client *rancher.Client, cluster *management.Cluster, node *management.Node, nodeRoles NodeRoles) (*management.NodePool, error) { - updatedNodePool, err := updateNodePoolQuantity(client, cluster, node, nodeRoles) - if err != nil { - return nil, err - } - - logrus.Infof("Node pool has been scaled!") - - return updatedNodePool, nil -} diff --git a/extensions/rke1/nodetemplates/aws/create.go b/extensions/rke1/nodetemplates/aws/create.go deleted file mode 100644 index 5aa184fb..00000000 --- a/extensions/rke1/nodetemplates/aws/create.go +++ /dev/null @@ -1,47 +0,0 @@ -package nodetemplates - -import ( - "github.com/rancher/shepherd/clients/rancher" - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/cloudcredentials/aws" - "github.com/rancher/shepherd/extensions/rke1/nodetemplates" - "github.com/rancher/shepherd/pkg/config" -) - -const awsEC2NodeTemplateNameBase = "awsNodeConfig" - -// CreateAWSNodeTemplate is a helper function that takes the rancher Client as a parameter and creates -// an AWS node template and returns the NodeTemplate response -func CreateAWSNodeTemplate(rancherClient *rancher.Client) (*nodetemplates.NodeTemplate, error) { - var amazonEC2NodeTemplateConfig nodetemplates.AmazonEC2NodeTemplateConfig - config.LoadConfig(nodetemplates.AmazonEC2NodeTemplateConfigurationFileKey, &amazonEC2NodeTemplateConfig) - - cloudCredential, err := aws.CreateAWSCloudCredentials(rancherClient) - if err != nil { - return nil, err - } - - nodeTemplate := nodetemplates.NodeTemplate{ - EngineInstallURL: "https://releases.rancher.com/install-docker/24.0.sh", - Name: awsEC2NodeTemplateNameBase, - AmazonEC2NodeTemplateConfig: &amazonEC2NodeTemplateConfig, - } - - nodeTemplateConfig := &nodetemplates.NodeTemplate{ - CloudCredentialID: cloudCredential.ID, - } - - config.LoadConfig(nodetemplates.NodeTemplateConfigurationFileKey, nodeTemplateConfig) - - nodeTemplateFinal, err := nodeTemplate.MergeOverride(nodeTemplateConfig, nodetemplates.AmazonEC2NodeTemplateConfigurationFileKey) - if err != nil { - return nil, err - } - - resp := &nodetemplates.NodeTemplate{} - err = rancherClient.Management.APIBaseClient.Ops.DoCreate(management.NodeTemplateType, *nodeTemplateFinal, resp) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/extensions/rke1/nodetemplates/aws_config.go b/extensions/rke1/nodetemplates/aws_config.go deleted file mode 100644 index 66c69072..00000000 --- a/extensions/rke1/nodetemplates/aws_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package nodetemplates - -// The json/yaml config key for the Amazon node template config -const AmazonEC2NodeTemplateConfigurationFileKey = "amazonec2Config" - -// AmazonNodeTemplateConfig is configuration need to create a Amazon node template -type AmazonEC2NodeTemplateConfig struct { - AMI string `json:"ami" yaml:"ami"` - BlockDurationMinutes string `json:"blockDurationMinutes" yaml:"blockDurationMinutes"` - DeviceName string `json:"deviceName" yaml:"deviceName"` - EncryptEBSVolume bool `json:"encryptEbsVolume" yaml:"encryptEbsVolume"` - Endpoint string `json:"endpoint" yaml:"endpoint"` - HTTPEndpoint string `json:"httpEndpoint" yaml:"httpEndpoint"` - HTTPTokens string `json:"httpTokens" yaml:"httpTokens"` - IAMInstanceProfile string `json:"iamInstanceProfile" yaml:"iamInstanceProfile"` - InsecureTransport bool `json:"insecureTransport" yaml:"insecureTransport"` - InstanceType string `json:"instanceType" yaml:"instanceType"` - KeyPairName string `json:"keyPairName" yaml:"keyPairName"` - KMSKey string `json:"kmsKey" yaml:"kmsKey"` - Monitoring bool `json:"monitoring" yaml:"monitoring"` - PrivateAddressOnly bool `json:"privateAddressOnly" yaml:"privateAddressOnly"` - Region string `json:"region" yaml:"region"` - RequestSpotInstance bool `json:"requestSpotInstance" yaml:"requestSpotInstance"` - Retries string `json:"retries" yaml:"retries"` - RootSize string `json:"rootSize" yaml:"rootSize"` - SecurityGroup []string `json:"securityGroup" yaml:"securityGroup"` - SecurityGroupReadonly bool `json:"securityGroupReadonly" yaml:"securityGroupReadonly"` - SessionToken string `json:"sessionToken" yaml:"sessionToken"` - SpotPrice string `json:"spotPrice" yaml:"spotPrice"` - SSHKeyContexts string `json:"sshKeyContexts" yaml:"sshKeyContexts"` - SSHUser string `json:"sshUser" yaml:"sshUser"` - SubnetID string `json:"subnetId" yaml:"subnetId"` - Tags string `json:"tags" yaml:"tags"` - Type string `json:"type" yaml:"type"` - UsePrivateAddress bool `json:"usePrivateAddress" yaml:"usePrivateAddress"` - UseEbsOptimizedInstance bool `json:"useEbsOptimizedInstance" yaml:"useEbsOptimizedInstance"` - VolumeType string `json:"volumeType" yaml:"volumeType"` - VPCId string `json:"vpcId" yaml:"vpcId"` - Zone string `json:"zone" yaml:"zone"` -} diff --git a/extensions/rke1/nodetemplates/azure/create.go b/extensions/rke1/nodetemplates/azure/create.go deleted file mode 100644 index db717574..00000000 --- a/extensions/rke1/nodetemplates/azure/create.go +++ /dev/null @@ -1,47 +0,0 @@ -package nodetemplates - -import ( - "github.com/rancher/shepherd/clients/rancher" - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/cloudcredentials/azure" - "github.com/rancher/shepherd/extensions/rke1/nodetemplates" - "github.com/rancher/shepherd/pkg/config" -) - -const azureNodeTemplateNameBase = "azureNodeConfig" - -// CreateAzureNodeTemplate is a helper function that takes the rancher Client as a parameter and creates -// an Azure node template and returns the NodeTemplate response -func CreateAzureNodeTemplate(rancherClient *rancher.Client) (*nodetemplates.NodeTemplate, error) { - var azureNodeTemplateConfig nodetemplates.AzureNodeTemplateConfig - config.LoadConfig(nodetemplates.AzureNodeTemplateConfigurationFileKey, &azureNodeTemplateConfig) - - cloudCredential, err := azure.CreateAzureCloudCredentials(rancherClient) - if err != nil { - return nil, err - } - - nodeTemplate := nodetemplates.NodeTemplate{ - EngineInstallURL: "https://releases.rancher.com/install-docker/23.0.sh", - Name: azureNodeTemplateNameBase, - AzureNodeTemplateConfig: &azureNodeTemplateConfig, - } - - nodeTemplateConfig := &nodetemplates.NodeTemplate{ - CloudCredentialID: cloudCredential.ID, - } - - config.LoadConfig(nodetemplates.NodeTemplateConfigurationFileKey, nodeTemplateConfig) - - nodeTemplateFinal, err := nodeTemplate.MergeOverride(nodeTemplateConfig, nodetemplates.AzureNodeTemplateConfigurationFileKey) - if err != nil { - return nil, err - } - - resp := &nodetemplates.NodeTemplate{} - err = rancherClient.Management.APIBaseClient.Ops.DoCreate(management.NodeTemplateType, *nodeTemplateFinal, resp) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/extensions/rke1/nodetemplates/azure_config.go b/extensions/rke1/nodetemplates/azure_config.go deleted file mode 100644 index 9a0c0da8..00000000 --- a/extensions/rke1/nodetemplates/azure_config.go +++ /dev/null @@ -1,34 +0,0 @@ -package nodetemplates - -// The json/yaml config key for the Azure node template config -const AzureNodeTemplateConfigurationFileKey = "azureConfig" - -// AzureNodeTemplateConfig is configuration need to create a Azure node template -type AzureNodeTemplateConfig struct { - AvailabilitySet string `json:"availabilitySet" yaml:"availabilitySet"` - CustomData string `json:"customData" yaml:"customData"` - DiskSize string `json:"diskSize" yaml:"diskSize"` - DNS string `json:"dns" yaml:"dns"` - DockerPort string `json:"dockerPort" yaml:"dockerPort"` - Environment string `json:"environment" yaml:"environment"` - FaultDomainCount string `json:"faultDomainCount" yaml:"faultDomainCount"` - Image string `json:"image" yaml:"image"` - Location string `json:"location" yaml:"location"` - ManagedDisks bool `json:"managedDisks" yaml:"managedDisks"` - NoPublicIP bool `json:"noPublicIp" yaml:"noPublicIp"` - OpenPort []string `json:"openPort" yaml:"openPort"` - NSG string `json:"nsg" yaml:"nsg"` - Plan string `json:"plan" yaml:"plan"` - PrivateIPAddress string `json:"privateIpAddress" yaml:"privateIpAddress"` - ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"` - Size string `json:"size" yaml:"size"` - SSHUser string `json:"sshUser" yaml:"sshUser"` - StaticPublicIP bool `json:"staticPublicIp" yaml:"staticPublicIp"` - StorageType string `json:"storageType" yaml:"storageType"` - Subnet string `json:"subnet" yaml:"subnet"` - SubnetPrefix string `json:"subnetPrefix" yaml:"subnetPrefix"` - Type string `json:"type" yaml:"type"` - UpdateDomainCount string `json:"updateDomainCount" yaml:"updateDomainCount"` - UsePrivateIP bool `json:"usePrivateIp" yaml:"usePrivateIp"` - VNET string `json:"vnet" yaml:"vnet"` -} diff --git a/extensions/rke1/nodetemplates/harvester/create.go b/extensions/rke1/nodetemplates/harvester/create.go deleted file mode 100644 index ea77ecc2..00000000 --- a/extensions/rke1/nodetemplates/harvester/create.go +++ /dev/null @@ -1,47 +0,0 @@ -package nodetemplates - -import ( - "github.com/rancher/shepherd/clients/rancher" - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/cloudcredentials/harvester" - "github.com/rancher/shepherd/extensions/rke1/nodetemplates" - "github.com/rancher/shepherd/pkg/config" -) - -const harvesterNodeTemplateNameBase = "harvesterNodeConfig" - -// CreateHarvesterNodeTemplate is a helper function that takes the rancher Client as a parameter and creates -// an Harvester node template and returns the NodeTemplate response -func CreateHarvesterNodeTemplate(rancherClient *rancher.Client) (*nodetemplates.NodeTemplate, error) { - var harvesterNodeTemplateConfig nodetemplates.HarvesterNodeTemplateConfig - config.LoadConfig(nodetemplates.HarvesterNodeTemplateConfigurationFileKey, &harvesterNodeTemplateConfig) - - cloudCredential, err := harvester.CreateHarvesterCloudCredentials(rancherClient) - if err != nil { - return nil, err - } - - nodeTemplate := nodetemplates.NodeTemplate{ - EngineInstallURL: "https://releases.rancher.com/install-docker/24.0.sh", - Name: harvesterNodeTemplateNameBase, - HarvesterNodeTemplateConfig: &harvesterNodeTemplateConfig, - } - - nodeTemplateConfig := &nodetemplates.NodeTemplate{ - CloudCredentialID: cloudCredential.ID, - } - - config.LoadConfig(nodetemplates.NodeTemplateConfigurationFileKey, nodeTemplateConfig) - - nodeTemplateFinal, err := nodeTemplate.MergeOverride(nodeTemplateConfig, nodetemplates.HarvesterNodeTemplateConfigurationFileKey) - if err != nil { - return nil, err - } - - resp := &nodetemplates.NodeTemplate{} - err = rancherClient.Management.APIBaseClient.Ops.DoCreate(management.NodeTemplateType, *nodeTemplateFinal, resp) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/extensions/rke1/nodetemplates/harvester_config.go b/extensions/rke1/nodetemplates/harvester_config.go deleted file mode 100644 index ad34ab18..00000000 --- a/extensions/rke1/nodetemplates/harvester_config.go +++ /dev/null @@ -1,27 +0,0 @@ -package nodetemplates - -// The json/yaml config key for the Harvester node template config -const HarvesterNodeTemplateConfigurationFileKey = "harvesterConfig" - -// HarvesterNodeTemplateConfig is configuration need to create a Harvester node template -type HarvesterNodeTemplateConfig struct { - CloudConfig string `json:"cloudConfig" yaml:"cloudConfig"` - CPUCount string `json:"cpuCount" yaml:"cpuCount"` - DiskBus string `json:"diskBus" yaml:"diskBus"` - DiskSize string `json:"diskSize" yaml:"diskSize"` - ImageName string `json:"imageName" yaml:"imageName"` - KeyPairName string `json:"keyPairName" yaml:"keyPairName"` - MemorySize string `json:"memorySize" yaml:"memorySize"` - NetworkData string `json:"networkData" yaml:"networkData"` - NetworkModel string `json:"networkModel" yaml:"networkModel"` - NetworkName string `json:"networkName" yaml:"networkName"` - NetworkType string `json:"networkType" yaml:"networkType"` - SSHPassword string `json:"sshPassword" yaml:"sshPassword"` - SSHPort string `json:"sshPort" yaml:"sshPort"` - SSHPrivateKeyPath string `json:"sshPrivateKeyPath" yaml:"sshPrivateKeyPath"` - SSHUser string `json:"sshUser" yaml:"sshUser"` - Type string `json:"type" yaml:"type"` - UserData string `json:"userData" yaml:"userData"` - VMAffinity string `json:"vmAffinity" yaml:"vmAffinity"` - VMNamespace string `json:"vmNamespace" yaml:"vmNamespace"` -} diff --git a/extensions/rke1/nodetemplates/linode/create.go b/extensions/rke1/nodetemplates/linode/create.go deleted file mode 100644 index c256c7fe..00000000 --- a/extensions/rke1/nodetemplates/linode/create.go +++ /dev/null @@ -1,47 +0,0 @@ -package nodetemplates - -import ( - "github.com/rancher/shepherd/clients/rancher" - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/cloudcredentials/linode" - "github.com/rancher/shepherd/extensions/rke1/nodetemplates" - "github.com/rancher/shepherd/pkg/config" -) - -const linodeNodeTemplateNameBase = "linodeNodeConfig" - -// CreateLinodeNodeTemplate is a helper function that takes the rancher Client as a parameter and creates -// an Linode node template and returns the NodeTemplate response -func CreateLinodeNodeTemplate(rancherClient *rancher.Client) (*nodetemplates.NodeTemplate, error) { - var linodeNodeTemplateConfig nodetemplates.LinodeNodeTemplateConfig - config.LoadConfig(nodetemplates.LinodeNodeTemplateConfigurationFileKey, &linodeNodeTemplateConfig) - - cloudCredential, err := linode.CreateLinodeCloudCredentials(rancherClient) - if err != nil { - return nil, err - } - - nodeTemplate := nodetemplates.NodeTemplate{ - EngineInstallURL: "https://releases.rancher.com/install-docker/24.0.sh", - Name: linodeNodeTemplateNameBase, - LinodeNodeTemplateConfig: &linodeNodeTemplateConfig, - } - - nodeTemplateConfig := &nodetemplates.NodeTemplate{ - CloudCredentialID: cloudCredential.ID, - } - - config.LoadConfig(nodetemplates.NodeTemplateConfigurationFileKey, nodeTemplateConfig) - - nodeTemplateFinal, err := nodeTemplate.MergeOverride(nodeTemplateConfig, nodetemplates.LinodeNodeTemplateConfigurationFileKey) - if err != nil { - return nil, err - } - - resp := &nodetemplates.NodeTemplate{} - err = rancherClient.Management.APIBaseClient.Ops.DoCreate(management.NodeTemplateType, *nodeTemplateFinal, resp) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/extensions/rke1/nodetemplates/linode_config.go b/extensions/rke1/nodetemplates/linode_config.go deleted file mode 100644 index 4323ec39..00000000 --- a/extensions/rke1/nodetemplates/linode_config.go +++ /dev/null @@ -1,24 +0,0 @@ -package nodetemplates - -// The json/yaml config key for the Linode node template config -const LinodeNodeTemplateConfigurationFileKey = "linodeConfig" - -// LinodeNodeTemplateConfig is configuration need to create a Linode node template -type LinodeNodeTemplateConfig struct { - AuthorizedUsers string `json:"authorizedUsers" yaml:"authorizedUsers"` - CreatePrivateIP bool `json:"createPrivateIP" yaml:"createPrivateIP"` - DockerPort string `json:"dockerPort" yaml:"dockerPort"` - Image string `json:"image" yaml:"image"` - InstanceType string `json:"instanceType" yaml:"instanceType"` - Label string `json:"label" yaml:"label"` - Region string `json:"region" yaml:"region"` - RootPass string `json:"rootPass" yaml:"rootPass"` - SSHPort string `json:"sshPort" yaml:"sshPort"` - SSHUser string `json:"sshUser" yaml:"sshUser"` - Stackscript string `json:"stackscript" yaml:"stackscript"` - StackscriptData string `json:"stackscriptData" yaml:"stackscriptData"` - SwapSize string `json:"swapSize" yaml:"swapSize"` - Tags string `json:"tags" yaml:"tags"` - Type string `json:"type" yaml:"type"` - UAPrefix string `json:"uaPrefix" yaml:"uaPrefix"` -} diff --git a/extensions/rke1/nodetemplates/nodetemplates.go b/extensions/rke1/nodetemplates/nodetemplates.go deleted file mode 100644 index 815c9582..00000000 --- a/extensions/rke1/nodetemplates/nodetemplates.go +++ /dev/null @@ -1,119 +0,0 @@ -package nodetemplates - -import ( - "maps" - - "github.com/pkg/errors" - "github.com/rancher/norman/types" - "gopkg.in/yaml.v2" - "k8s.io/utils/strings/slices" -) - -// The json/yaml config key for the node template config -const NodeTemplateConfigurationFileKey = "nodeTemplate" - -// NodeTemplate is the main struct needed to create a node template for an RKE1 cluster -type NodeTemplate struct { - types.Resource - Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` - AuthCertificateAuthority string `json:"authCertificateAuthority,omitempty" yaml:"authCertificateAuthority,omitempty"` - AuthKey string `json:"authKey,omitempty" yaml:"authKey,omitempty"` - CloudCredentialID string `json:"cloudCredentialId,omitempty" yaml:"cloudCredentialId,omitempty"` - Created string `json:"created,omitempty" yaml:"created,omitempty"` - CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - DockerVersion string `json:"dockerVersion,omitempty" yaml:"dockerVersion,omitempty"` - Driver string `json:"driver,omitempty" yaml:"driver,omitempty"` - EngineEnv map[string]string `json:"engineEnv,omitempty" yaml:"engineEnv,omitempty"` - EngineInsecureRegistry []string `json:"engineInsecureRegistry,omitempty" yaml:"engineInsecureRegistry,omitempty"` - EngineInstallURL string `json:"engineInstallURL,omitempty" yaml:"engineInstallURL,omitempty"` - EngineLabel map[string]string `json:"engineLabel,omitempty" yaml:"engineLabel,omitempty"` - EngineOpt map[string]string `json:"engineOpt,omitempty" yaml:"engineOpt,omitempty"` - EngineRegistryMirror []string `json:"engineRegistryMirror,omitempty" yaml:"engineRegistryMirror,omitempty"` - EngineStorageDriver string `json:"engineStorageDriver,omitempty" yaml:"engineStorageDriver,omitempty"` - Label map[string]string `json:"label,omitempty" yaml:"label,omitempty"` - AmazonEC2NodeTemplateConfig *AmazonEC2NodeTemplateConfig `json:"amazonec2Config" yaml:"amazonec2Config,omitempty"` - AzureNodeTemplateConfig *AzureNodeTemplateConfig `json:"azureConfig" yaml:"azureConfig,omitempty"` - HarvesterNodeTemplateConfig *HarvesterNodeTemplateConfig `json:"harvesterConfig" yaml:"harvesterConfig,omitempty"` - LinodeNodeTemplateConfig *LinodeNodeTemplateConfig `json:"linodeConfig" yaml:"linodeConfig,omitempty"` - VmwareVsphereNodeTemplateConfig *VmwareVsphereNodeTemplateConfig `json:"vmwarevsphereConfig" yaml:"vmwarevsphereConfig,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - NamespaceID string `json:"namespaceId,omitempty" yaml:"namespaceId,omitempty"` - Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` - State string `json:"state,omitempty" yaml:"state,omitempty"` - Transitioning string `json:"transitioning,omitempty" yaml:"transitioning,omitempty"` - TransitioningMessage string `json:"transitioningMessage,omitempty" yaml:"transitioningMessage,omitempty"` - Type string `json:"type,omitempty" yaml:"type,omitempty"` - UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"` - UseInternalIPAddress *bool `json:"useInternalIpAddress,omitempty" yaml:"useInternalIpAddress,omitempty"` -} - -func providerTemplateConfigKeys() []string { - return []string{ - AmazonEC2NodeTemplateConfigurationFileKey, - AzureNodeTemplateConfigurationFileKey, - HarvesterNodeTemplateConfigurationFileKey, - LinodeNodeTemplateConfigurationFileKey, - VmwareVsphereNodeTemplateConfigurationFileKey, - } -} - -// MergeOverride merges two NodeTemplate objects by overriding fields from n1 with fields from n2 -// - preserves fields not present in n2 -// - deletes all provider keys except the specified providerTemplateConfigKey from both NodeTemplate objects before merging -// -// providerTemplateConfigKey: The key representing the provider template configuration to preserve during merging -// -// returns a pointer to the merged NodeTemplate and an error if any -func (n1 *NodeTemplate) MergeOverride(n2 *NodeTemplate, providerTemplateConfigKey string) (*NodeTemplate, error) { - var n1Data map[string]any - n1YAML, err := yaml.Marshal(&n1) - if err != nil { - return nil, errors.Wrap(err, "MergeOverride: ") - } - err = yaml.Unmarshal(n1YAML, &n1Data) - if err != nil { - return nil, errors.Wrap(err, "MergeOverride: ") - } - - var n2Data map[string]any - n2YAML, err := yaml.Marshal(&n2) - if err != nil { - return nil, errors.Wrap(err, "MergeOverride: ") - } - err = yaml.Unmarshal(n2YAML, &n2Data) - if err != nil { - return nil, errors.Wrap(err, "MergeOverride: ") - } - - configKeys := providerTemplateConfigKeys() - var keyPosition int - for pos, configKey := range configKeys { - if configKey == providerTemplateConfigKey { - keyPosition = pos - } - } - - // Delete all other provider keys from both nodetemplates - keysToDelete := append(configKeys[:keyPosition], configKeys[keyPosition+1:]...) - maps.DeleteFunc(n1Data, func(k string, v any) bool { - return slices.Contains(keysToDelete, k) && k != providerTemplateConfigKey - }) - maps.DeleteFunc(n2Data, func(k string, v any) bool { - return slices.Contains(keysToDelete, k) && k != providerTemplateConfigKey - }) - - maps.Copy(n1Data, n2Data) - - tempData, err := yaml.Marshal(n1Data) - if err != nil { - return nil, errors.Wrap(err, "MergeOverride: ") - } - - var mergedNodeTemplate = NodeTemplate{} - err = yaml.Unmarshal(tempData, &mergedNodeTemplate) - if err != nil { - return nil, errors.Wrap(err, "MergeOverride: ") - } - return &mergedNodeTemplate, nil -} diff --git a/extensions/rke1/nodetemplates/vsphere/create.go b/extensions/rke1/nodetemplates/vsphere/create.go deleted file mode 100644 index d3a0b5db..00000000 --- a/extensions/rke1/nodetemplates/vsphere/create.go +++ /dev/null @@ -1,58 +0,0 @@ -package nodetemplates - -import ( - "github.com/rancher/shepherd/clients/rancher" - management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" - "github.com/rancher/shepherd/extensions/cloudcredentials/vsphere" - "github.com/rancher/shepherd/extensions/rke1/nodetemplates" - "github.com/rancher/shepherd/pkg/config" -) - -const vmwarevsphereNodeTemplateNameBase = "vmwarevsphereNodeConfig" - -// CreateVSphereNodeTemplate is a helper function that takes the rancher Client as a parameter and creates -// a VSphere node template and returns the NodeTemplate response -func CreateVSphereNodeTemplate(rancherClient *rancher.Client) (*nodetemplates.NodeTemplate, error) { - var vmwarevsphereNodeTemplateConfig nodetemplates.VmwareVsphereNodeTemplateConfig - config.LoadConfig(nodetemplates.VmwareVsphereNodeTemplateConfigurationFileKey, &vmwarevsphereNodeTemplateConfig) - - cloudCredential, err := vsphere.CreateVsphereCloudCredentials(rancherClient) - if err != nil { - return nil, err - } - - nodeTemplate := nodetemplates.NodeTemplate{ - EngineInstallURL: "https://releases.rancher.com/install-docker/20.10.sh", - Name: vmwarevsphereNodeTemplateNameBase, - VmwareVsphereNodeTemplateConfig: &vmwarevsphereNodeTemplateConfig, - } - - nodeTemplateConfig := &nodetemplates.NodeTemplate{ - CloudCredentialID: cloudCredential.ID, - } - - config.LoadConfig(nodetemplates.NodeTemplateConfigurationFileKey, nodeTemplateConfig) - - nodeTemplateFinal, err := nodeTemplate. - MergeOverride(nodeTemplateConfig, nodetemplates.VmwareVsphereNodeTemplateConfigurationFileKey) - if err != nil { - return nil, err - } - - resp := &nodetemplates.NodeTemplate{} - err = rancherClient.Management.APIBaseClient.Ops.DoCreate(management.NodeTemplateType, *nodeTemplateFinal, resp) - - if err != nil { - return nil, err - } - - return resp, nil -} - -// GetVsphereNodeTemplate is a helper to get the vsphere node template from a config -func GetVsphereNodeTemplate() *nodetemplates.VmwareVsphereNodeTemplateConfig { - var vmwarevsphereNodeTemplateConfig nodetemplates.VmwareVsphereNodeTemplateConfig - config.LoadConfig(nodetemplates.VmwareVsphereNodeTemplateConfigurationFileKey, &vmwarevsphereNodeTemplateConfig) - - return &vmwarevsphereNodeTemplateConfig -} diff --git a/extensions/rke1/nodetemplates/vsphere_config.go b/extensions/rke1/nodetemplates/vsphere_config.go deleted file mode 100644 index 33944f81..00000000 --- a/extensions/rke1/nodetemplates/vsphere_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package nodetemplates - -// The json/yaml config key for the VSphere node template config -const VmwareVsphereNodeTemplateConfigurationFileKey = "vmwarevsphereConfig" - -// VmwareVsphereNodeTemplateConfig is configuration need to create a VSphere node template -type VmwareVsphereNodeTemplateConfig struct { - Cfgparam []string `json:"cfgparam" yaml:"cfgparam"` - CloneFrom string `json:"cloneFrom" yaml:"cloneFrom"` - CloudConfig string `json:"cloudConfig" yaml:"cloudConfig"` - Cloundinit string `json:"cloundinit" yaml:"cloundinit"` - ContentLibrary string `json:"contentLibrary" yaml:"contentLibrary"` - CPUCount string `json:"cpuCount" yaml:"cpuCount"` - CreationType string `json:"creationType" yaml:"creationType"` - CustomAttribute []string `json:"customAttribute" yaml:"customAttribute"` - Datacenter string `json:"datacenter" yaml:"datacenter"` - Datastore string `json:"datastore" yaml:"datastore"` - DatastoreURL string `json:"datastoreURL" yaml:"datastoreURL"` - DatastoreCluster string `json:"datastoreCluster" yaml:"datastoreCluster"` - DiskSize string `json:"diskSize" yaml:"diskSize"` - Folder string `json:"folder" yaml:"folder"` - HostSystem string `json:"hostSystem" yaml:"hostSystem"` - MemorySize string `json:"memorySize" yaml:"memorySize"` - Network []string `json:"network" yaml:"network"` - OS string `json:"os" yaml:"os"` - Password string `json:"password" yaml:"password"` - Pool string `json:"pool" yaml:"pool"` - SSHPassword string `json:"sshPassword" yaml:"sshPassword"` - SSHPort string `json:"sshPort" yaml:"sshPort"` - SSHUser string `json:"sshUser" yaml:"sshUser"` - SSHUserGroup string `json:"sshUserGroup" yaml:"sshUserGroup"` - Tag []string `json:"tag" yaml:"tag"` - Username string `json:"username" yaml:"username"` - VappIpallocationplicy string `json:"vappIpallocationplicy" yaml:"vappIpallocationplicy"` - VappIpprotocol string `json:"vappIpprotocol" yaml:"vappIpprotocol"` - VappProperty []string `json:"vappProperty" yaml:"vappProperty"` - VappTransport string `json:"vappTransport" yaml:"vappTransport"` - Vcenter string `json:"vcenter" yaml:"vcenter"` - VcenterPort string `json:"vcenterPort" yaml:"vcenterPort"` -}