diff --git a/.golangci.yaml b/.golangci.yaml index bb013d5e..dae34707 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -17,7 +17,7 @@ linters-settings: disabled: true - name: exported disabled: false - goconst: + goconst: min-len: 2 min-occurrences: 10 match-constant: true @@ -39,3 +39,7 @@ issues: - ^*\.yaml$ - ^*\.yml$ exclude-generated-strict: true + exclude-rules: + - path: extensions/kubeconfig/podlogs.go + linters: + - forbidigo diff --git a/clients/dynamic/dynamic.go b/clients/dynamic/dynamic.go index b46ec3ec..f2a6f1a5 100644 --- a/clients/dynamic/dynamic.go +++ b/clients/dynamic/dynamic.go @@ -23,7 +23,7 @@ type Client struct { } // NewForConfig creates a new dynamic client or returns an error. -func NewForConfig(ts *session.Session, inConfig *rest.Config) (dynamic.Interface, error) { +func NewForConfig(ts *session.Session, inConfig *rest.Config) (*Client, error) { logrus.Debugf("Dynamic Client Host:%s", inConfig.Host) dynamicClient, err := dynamic.NewForConfig(inConfig) @@ -44,7 +44,7 @@ func NewForConfig(ts *session.Session, inConfig *rest.Config) (dynamic.Interface // Version: "v3", // Resource: "users", // } -func (d *Client) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { +func (d *Client) Resource(resource schema.GroupVersionResource) *NamespaceableResourceClient { return &NamespaceableResourceClient{ NamespaceableResourceInterface: d.Interface.Resource(resource), ts: d.ts, @@ -59,7 +59,7 @@ type NamespaceableResourceClient struct { } // Namespace returns a dynamic.ResourceInterface that is embedded in ResourceClient, so ultimately its Create is overwritten. -func (d *NamespaceableResourceClient) Namespace(s string) dynamic.ResourceInterface { +func (d *NamespaceableResourceClient) Namespace(s string) *ResourceClient { return &ResourceClient{ ResourceInterface: d.NamespaceableResourceInterface.Namespace(s), ts: d.ts, diff --git a/clients/helm/helm.go b/clients/helm/helm.go index 43a1add2..70efd6de 100644 --- a/clients/helm/helm.go +++ b/clients/helm/helm.go @@ -7,7 +7,19 @@ import ( "github.com/rancher/shepherd/pkg/session" ) -var helmCmd = "helm_v3" +var helmCmd = "" + +func SetHelmCmd(command string) error { + helmCmd = "helm_v3" + if command != "" { + msg, err := exec.Command(command).CombinedOutput() + if err != nil { + return errors.Wrapf(err, "SetHelmCmd: errored while running `%s` %s", command, string(msg)) + } + helmCmd = command + } + return nil +} // InstallChart installs a helm chart using helm CLI. // Send the helm set command strings such as "--set", "installCRDs=true" @@ -34,12 +46,8 @@ func InstallChart(ts *session.Session, releaseName, helmRepo, namespace, version commandArgs = append(commandArgs, "--version", version) } - msg, err := exec.Command(helmCmd, commandArgs...).CombinedOutput() - if err != nil { - return errors.Wrap(err, "InstallChart: "+string(msg)) - } - - return nil + _, err := execCommand("InstallChart: ", commandArgs) + return err } // UpgradeChart upgrades a helm chart using helm CLI. @@ -67,12 +75,24 @@ func UpgradeChart(ts *session.Session, releaseName, helmRepo, namespace, version commandArgs = append(commandArgs, "--version", version) } - msg, err := exec.Command(helmCmd, commandArgs...).CombinedOutput() - if err != nil { - return errors.Wrap(err, "UpgradeChart: "+string(msg)) + _, err := execCommand("UpgradeChart: ", commandArgs) + return err + +} + +func GetValues(releaseName, namespace string, args ...string) (string, error) { + // Default helm upgrade command + commandArgs := []string{ + "get", + "values", + releaseName, + "--namespace", + namespace, } - return nil + commandArgs = append(commandArgs, args...) + + return execCommand("GetValues: ", commandArgs) } // UninstallChart uninstalls a helm chart using helm CLI in a given namespace @@ -87,20 +107,30 @@ func UninstallChart(releaseName, namespace string, args ...string) error { "--wait", } - msg, err := exec.Command(helmCmd, commandArgs...).CombinedOutput() - if err != nil { - return errors.Wrap(err, "UninstallChart: "+string(msg)) - } + commandArgs = append(commandArgs, args...) - return nil + _, err := execCommand("UninstallChart: ", commandArgs) + return err } // AddHelmRepo adds the specified helm repository using the helm repo add command. func AddHelmRepo(name, url string) error { - msg, err := exec.Command(helmCmd, "repo", "add", name, url).CombinedOutput() + commandArgs := []string{ + "repo", + "add", + name, + url, + } + + _, err := execCommand("AddHelmRepo: ", commandArgs) + return err +} + +func execCommand(errMsg string, commandArgs []string) (string, error) { + msg, err := exec.Command(helmCmd, commandArgs...).CombinedOutput() if err != nil { - return errors.Wrap(err, "AddHelmRepo: "+string(msg)) + return "", errors.Wrap(err, errMsg+string(msg)) } - return nil + return string(msg), nil } diff --git a/clients/rancher/client.go b/clients/rancher/client.go index a4d120f9..5a55ef5b 100644 --- a/clients/rancher/client.go +++ b/clients/rancher/client.go @@ -16,6 +16,7 @@ import ( management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" v1 "github.com/rancher/shepherd/clients/rancher/v1" + rancherDynamic "github.com/rancher/shepherd/clients/dynamic" kubeProvisioning "github.com/rancher/shepherd/clients/provisioning" "github.com/rancher/shepherd/clients/ranchercli" kubeRKE "github.com/rancher/shepherd/clients/rke" @@ -27,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -222,7 +222,7 @@ func (c *Client) GetClusterCatalogClient(clusterID string) (*catalog.Client, err } // GetRancherDynamicClient is a helper function that instantiates a dynamic client to communicate with the rancher host. -func (c *Client) GetRancherDynamicClient() (dynamic.Interface, error) { +func (c *Client) GetRancherDynamicClient() (*rancherDynamic.Client, error) { dynamic, err := frameworkDynamic.NewForConfig(c.Session, c.restConfig) if err != nil { return nil, err @@ -251,7 +251,7 @@ func (c *Client) GetKubeAPIRKEClient() (*kubeRKE.Client, error) { } // GetDownStreamClusterClient is a helper function that instantiates a dynamic client to communicate with a specific cluster. -func (c *Client) GetDownStreamClusterClient(clusterID string) (dynamic.Interface, error) { +func (c *Client) GetDownStreamClusterClient(clusterID string) (*rancherDynamic.Client, error) { restConfig := *c.restConfig restConfig.Host = fmt.Sprintf("https://%s/k8s/clusters/%s", c.restConfig.Host, clusterID) @@ -263,7 +263,7 @@ func (c *Client) GetDownStreamClusterClient(clusterID string) (dynamic.Interface } // SwitchContext is a helper function that changes the current context to `context` and instantiates a dynamic client -func (c *Client) SwitchContext(context string, clientConfig *clientcmd.ClientConfig) (dynamic.Interface, error) { +func (c *Client) SwitchContext(context string, clientConfig *clientcmd.ClientConfig) (*rancherDynamic.Client, error) { overrides := clientcmd.ConfigOverrides{CurrentContext: context} rawConfig, err := (*clientConfig).RawConfig() diff --git a/extensions/clusters/clusters.go b/extensions/clusters/clusters.go index f6a0cd81..f2fb095d 100644 --- a/extensions/clusters/clusters.go +++ b/extensions/clusters/clusters.go @@ -1278,6 +1278,43 @@ func WatchAndWaitForCluster(client *rancher.Client, steveID string) error { return err } +// WatchAndWaitForCluster is function that waits for a cluster to go unactive before checking its active state. +func WatchAndWaitForClusterWithTimeout(client *rancher.Client, steveID string, timeoutSeconds *int64) error { + var clusterResp *v1.SteveAPIObject + err := kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) { + clusterResp, err = client.Steve.SteveType(ProvisioningSteveResourceType).ByID(steveID) + if err != nil { + return false, err + } + state := clusterResp.ObjectMeta.State.Name + return state != "active", nil + }) + if err != nil { + return err + } + logrus.Infof("waiting for cluster to be up.............") + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + if err != nil { + return err + } + kubeProvisioningClient, err := adminClient.GetKubeAPIProvisioningClient() + if err != nil { + return err + } + + result, err := kubeProvisioningClient.Clusters(clusterResp.ObjectMeta.Namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + clusterResp.Name, + TimeoutSeconds: timeoutSeconds, + }) + if err != nil { + return err + } + + err = wait.WatchWait(result, IsProvisioningClusterReady) + return err +} + // GetProvisioningClusterByName is a helper function to get cluster object with the cluster name func GetProvisioningClusterByName(client *rancher.Client, clusterName string, namespace string) (*apisV1.Cluster, *v1.SteveAPIObject, error) { clusterObj, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(namespace + "/" + clusterName) @@ -1317,6 +1354,27 @@ func WaitForActiveRKE1Cluster(client *rancher.Client, clusterID string) error { return nil } +func WaitForActiveRKE1ClusterWithTimeout(client *rancher.Client, clusterID string, timeoutMinutes int) error { + err := kwait.Poll(500*time.Millisecond, 30*time.Minute, func() (done bool, err error) { + client, err = client.ReLogin() + if err != nil { + return false, err + } + clusterResp, err := client.Management.Cluster.ByID(clusterID) + if err != nil { + return false, err + } + if clusterResp.State == active { + return true, nil + } + return false, nil + }) + if err != nil { + return err + } + return nil +} + // ListDownstreamClusters is a helper function to get the name of the downstream clusters func ListDownstreamClusters(client *rancher.Client) (clusterNames []string, err error) { clusterList, err := client.Steve.SteveType(ProvisioningSteveResourceType).ListAll(nil) diff --git a/extensions/codecoverage/codecoverage.go b/extensions/codecoverage/codecoverage.go index 084d6019..4e49f47d 100644 --- a/extensions/codecoverage/codecoverage.go +++ b/extensions/codecoverage/codecoverage.go @@ -7,6 +7,7 @@ import ( "time" apiv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + rancherDynamic "github.com/rancher/shepherd/clients/dynamic" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" @@ -17,7 +18,6 @@ import ( k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" ) var podGroupVersionResource = corev1.SchemeGroupVersion.WithResource("pods") @@ -30,7 +30,7 @@ const ( outputDir = "cover" ) -func checkServiceIsRunning(dynamicClient dynamic.Interface) error { +func checkServiceIsRunning(dynamicClient rancherDynamic.Client) error { return kwait.Poll(500*time.Millisecond, 2*time.Minute, func() (done bool, err error) { _, err = dynamicClient.Resource(podGroupVersionResource).Namespace(cattleSystemNameSpace).List(context.Background(), metav1.ListOptions{}) if k8sErrors.IsInternalError(err) || k8sErrors.IsServiceUnavailable(err) { @@ -120,7 +120,7 @@ func KillRancherTestServicesRetrieveCoverage(client *rancher.Client) error { return err } - err = checkServiceIsRunning(dynamicClient) + err = checkServiceIsRunning(*dynamicClient) if err != nil { return err } @@ -169,7 +169,7 @@ func KillAgentTestServicesRetrieveCoverage(client *rancher.Client) error { return err } - err = checkServiceIsRunning(dynamicClient) + err = checkServiceIsRunning(*dynamicClient) if err != nil { return err } diff --git a/extensions/kubeapi/cluster/operations.go b/extensions/kubeapi/cluster/operations.go new file mode 100644 index 00000000..4016f5e2 --- /dev/null +++ b/extensions/kubeapi/cluster/operations.go @@ -0,0 +1,34 @@ +package cluster + +import ( + "context" + + "github.com/rancher/shepherd/clients/rancher" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ManagementGroupName = "management.cattle.io" + +func ManagementClusterGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{Group: ManagementGroupName, Version: "v3", Resource: "clusters"} +} + +// ListAll is a helper function that uses the dynamic client to return a list of all clusters.management.cattle.io. +func ListAll(client *rancher.Client, opts *metav1.ListOptions) (list *unstructured.UnstructuredList, err error) { + if opts == nil { + opts = &metav1.ListOptions{} + } + + dynamic, err := client.GetRancherDynamicClient() + if err != nil { + return + } + + unstructuredClusterList, err := dynamic.Resource(ManagementClusterGVR()).List(context.TODO(), *opts) + if err != nil { + return + } + return unstructuredClusterList, err +} diff --git a/extensions/kubeapi/cluster/summary.go b/extensions/kubeapi/cluster/summary.go index 50b86669..e762223f 100644 --- a/extensions/kubeapi/cluster/summary.go +++ b/extensions/kubeapi/cluster/summary.go @@ -6,7 +6,6 @@ import ( "github.com/rancher/shepherd/clients/rancher" "github.com/rancher/wrangler/pkg/summary" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) // IsClusterActive is a helper function that uses the dynamic client to return cluster's ready state. @@ -16,7 +15,7 @@ func IsClusterActive(client *rancher.Client, clusterID string) (ready bool, err return } - unstructuredCluster, err := dynamic.Resource(schema.GroupVersionResource{Group: "management.cattle.io", Version: "v3", Resource: "clusters"}).Get(context.TODO(), clusterID, metav1.GetOptions{}) + unstructuredCluster, err := dynamic.Resource(ManagementClusterGVR()).Get(context.TODO(), clusterID, metav1.GetOptions{}) if err != nil { return } diff --git a/extensions/kubeapi/configmaps/configmaps.go b/extensions/kubeapi/configmaps/configmaps.go index 51509efe..fb7682fd 100644 --- a/extensions/kubeapi/configmaps/configmaps.go +++ b/extensions/kubeapi/configmaps/configmaps.go @@ -9,6 +9,16 @@ import ( coreV1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" +) + +type PatchOP string + +const ( + AddPatchOP PatchOP = "add" + ReplacePatchOP PatchOP = "replace" + RemovePatchOP PatchOP = "remove" ) // ConfigMapGroupVersionResource is the required Group Version Resource for accessing config maps in a cluster, @@ -19,9 +29,13 @@ var ConfigMapGroupVersionResource = schema.GroupVersionResource{ Resource: "configmaps", } +type ConfigMapList struct { + Items []coreV1.ConfigMap +} + // CreateConfigMap is a helper function that uses the dynamic client to create a config map on a namespace for a specific cluster. // It registers a delete fuction. -func CreateConfigMap(client *rancher.Client, clusterName, configMapName, description, namespace string, data, labels, annotations map[string]string) (*coreV1.ConfigMap, error) { +func CreateConfigMap(client *rancher.Client, clusterID, configMapName, description, namespace string, data, labels, annotations map[string]string) (*coreV1.ConfigMap, error) { // ConfigMap object for a namespace in a cluster annotations["field.cattle.io/description"] = description configMap := &coreV1.ConfigMap{ @@ -34,7 +48,7 @@ func CreateConfigMap(client *rancher.Client, clusterName, configMapName, descrip Data: data, } - dynamicClient, err := client.GetDownStreamClusterClient(clusterName) + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) if err != nil { return nil, err } @@ -76,3 +90,98 @@ func NewConfigmapTemplate(configmapName, namespace string, annotations, labels, Data: data, } } + +func ListConfigMaps(client *rancher.Client, clusterID, namespace string, opts metav1.ListOptions) (*ConfigMapList, error) { + configMapList := new(ConfigMapList) + + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + configMaps, err := configMapResource.List(context.TODO(), opts) + if err != nil { + return nil, err + } + + for _, unstructuredConfigMap := range configMaps.Items { + newConfigMap := &coreV1.ConfigMap{} + + err := scheme.Scheme.Convert(&unstructuredConfigMap, newConfigMap, unstructuredConfigMap.GroupVersionKind()) + if err != nil { + return nil, err + } + + configMapList.Items = append(configMapList.Items, *newConfigMap) + } + + return configMapList, nil +} + +func GetConfigMapByName(client *rancher.Client, clusterID, configMapName, namespace string, getOpts metav1.GetOptions) (*coreV1.ConfigMap, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + unstructuredResp, err := configMapResource.Get(context.TODO(), configMapName, getOpts) + if err != nil { + return nil, err + } + + newConfigMap := &coreV1.ConfigMap{} + err = scheme.Scheme.Convert(unstructuredResp, newConfigMap, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newConfigMap, nil +} + +func PatchConfigMap(client *rancher.Client, clusterID, configMapName, namespace string, data string, patchType types.PatchType) (*coreV1.ConfigMap, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + + unstructuredResp, err := configMapResource.Patch(context.TODO(), configMapName, patchType, []byte(data), metav1.PatchOptions{}) + if err != nil { + return nil, err + } + + newConfigMap := &coreV1.ConfigMap{} + err = scheme.Scheme.Convert(unstructuredResp, newConfigMap, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newConfigMap, nil +} + +// PatchConfigMapFromYAML is a helper function that uses the dynamic client to patch a configMap in a namespace for a specific cluster. +// Different merge strategies are supported based on the PatchType. +func PatchConfigMapFromYAML(client *rancher.Client, clusterID, configMapName, namespace string, rawYAML []byte, patchType types.PatchType) (*coreV1.ConfigMap, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + configMapResource := dynamicClient.Resource(ConfigMapGroupVersionResource).Namespace(namespace) + + rawJSON, err := yaml.ToJSON(rawYAML) + if err != nil { + return nil, err + } + + unstructuredResp, err := configMapResource.Patch(context.TODO(), configMapName, patchType, rawJSON, metav1.PatchOptions{}) + if err != nil { + return nil, err + } + + newConfigMap := &coreV1.ConfigMap{} + err = scheme.Scheme.Convert(unstructuredResp, newConfigMap, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newConfigMap, nil +} diff --git a/extensions/kubeapi/helm/helm.go b/extensions/kubeapi/helm/helm.go index 4e2820bf..e6564c72 100644 --- a/extensions/kubeapi/helm/helm.go +++ b/extensions/kubeapi/helm/helm.go @@ -45,7 +45,7 @@ func InstallRancher(ts *session.Session, restConfig *rest.Config) error { } // Install Rancher Chart - err = helm.InstallChart(ts, "rancher", + return helm.InstallChart(ts, "rancher", "rancher-stable/rancher", "cattle-system", "", @@ -57,11 +57,6 @@ func InstallRancher(ts *session.Session, restConfig *rest.Config) error { "useBundledSystemChart=true", "--set", "replicas=1") - if err != nil { - return err - } - - return nil } // InstallCertManager installs latest version cert manager available through helm @@ -87,10 +82,5 @@ func InstallCertManager(ts *session.Session, restConfig *rest.Config) error { } // Install cert-manager Chart - err = helm.InstallChart(ts, "cert-manager", "jetstack/cert-manager", "cert-manager", "", "--set", "installCRDs=true") - if err != nil { - return err - } - - return nil + return helm.InstallChart(ts, "cert-manager", "jetstack/cert-manager", "cert-manager", "", "--set", "installCRDs=true") } diff --git a/extensions/kubeapi/rbac/list.go b/extensions/kubeapi/rbac/list.go index 181ab50a..41b6e210 100644 --- a/extensions/kubeapi/rbac/list.go +++ b/extensions/kubeapi/rbac/list.go @@ -159,3 +159,28 @@ func ListRoleTemplates(client *rancher.Client, listOpt metav1.ListOptions) (*v3. return rtList, nil } + +// ListProjectRoleTemplateBindings is a helper function that uses the dynamic client to list projectroletemplatebindings from local cluster. +func ListProjectRoleTemplateBindings(client *rancher.Client, listOpt metav1.ListOptions) (*v3.ProjectRoleTemplateBindingList, error) { + dynamicClient, err := client.GetDownStreamClusterClient(LocalCluster) + if err != nil { + return nil, err + } + + unstructuredList, err := dynamicClient.Resource(ProjectRoleTemplateBindingGroupVersionResource).Namespace("").List(context.TODO(), listOpt) + if err != nil { + return nil, err + } + + prtbList := new(v3.ProjectRoleTemplateBindingList) + for _, unstructuredPRTB := range unstructuredList.Items { + prtb := &v3.ProjectRoleTemplateBinding{} + err := scheme.Scheme.Convert(&unstructuredPRTB, prtb, unstructuredPRTB.GroupVersionKind()) + if err != nil { + return nil, err + } + + prtbList.Items = append(prtbList.Items, *prtb) + } + return prtbList, nil +} diff --git a/extensions/kubeapi/workloads/deployments/get.go b/extensions/kubeapi/workloads/deployments/get.go new file mode 100644 index 00000000..5f152109 --- /dev/null +++ b/extensions/kubeapi/workloads/deployments/get.go @@ -0,0 +1,33 @@ +package deployments + +import ( + "context" + + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/pkg/api/scheme" + appv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetDeployment is a helper function that uses the dynamic client to get a deployment in a cluster with its get options. +func GetDeployment(client *rancher.Client, clusterID, name, namespace string, getOpts metav1.GetOptions) (*appv1.Deployment, error) { + + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + + deploymentResource := dynamicClient.Resource(DeploymentGroupVersionResource).Namespace(namespace) + unstructuredResp, err := deploymentResource.Get(context.TODO(), name, getOpts) + if err != nil { + return nil, err + } + + newDeployment := &appv1.Deployment{} + err = scheme.Scheme.Convert(unstructuredResp, newDeployment, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + + return newDeployment, nil +} diff --git a/extensions/kubeapi/workloads/deployments/patch.go b/extensions/kubeapi/workloads/deployments/patch.go new file mode 100644 index 00000000..a7916db4 --- /dev/null +++ b/extensions/kubeapi/workloads/deployments/patch.go @@ -0,0 +1,66 @@ +package deployments + +import ( + "context" + "fmt" + "time" + + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/pkg/api/scheme" + appv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func PatchDeployment(client *rancher.Client, clusterID, deploymentName, namespace string, data string, patchType types.PatchType) (*appv1.Deployment, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + deploymentResource := dynamicClient.Resource(DeploymentGroupVersionResource).Namespace(namespace) + + unstructuredResp, err := deploymentResource.Patch(context.TODO(), deploymentName, patchType, []byte(data), metav1.PatchOptions{}) + if err != nil { + return nil, err + } + + newDeployment := &appv1.Deployment{} + err = scheme.Scheme.Convert(unstructuredResp, newDeployment, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newDeployment, nil +} + +// PatchDeploymentFromYAML is a helper function that uses the dynamic client to patch a deployment in a namespace for a specific cluster. +// Different merge strategies are supported based on the PatchType. +func PatchDeploymentFromYAML(client *rancher.Client, clusterID, deploymentName, namespace string, rawYAML []byte, patchType types.PatchType) (*appv1.Deployment, error) { + dynamicClient, err := client.GetDownStreamClusterClient(clusterID) + if err != nil { + return nil, err + } + deploymentResource := dynamicClient.Resource(DeploymentGroupVersionResource).Namespace(namespace) + + rawJSON, err := yaml.ToJSON(rawYAML) + if err != nil { + return nil, err + } + + unstructuredResp, err := deploymentResource.Patch(context.TODO(), deploymentName, patchType, rawJSON, metav1.PatchOptions{}) + if err != nil { + return nil, err + } + + newDeployment := &appv1.Deployment{} + err = scheme.Scheme.Convert(unstructuredResp, newDeployment, unstructuredResp.GroupVersionKind()) + if err != nil { + return nil, err + } + return newDeployment, nil +} + +func RestartDeployment(client *rancher.Client, clusterID, deploymentName, namespace string) (*appv1.Deployment, error) { + data := fmt.Sprintf(`{"spec": {"template": {"metadata": {"annotations": {"kubectl.kubernetes.io/restartedAt": "%s"}}}}}`, time.Now().Format("20060102150405")) + return PatchDeploymentFromYAML(client, clusterID, deploymentName, namespace, []byte(data), types.StrategicMergePatchType) +} diff --git a/extensions/kubeconfig/exec.go b/extensions/kubeconfig/exec.go index 6a6838ff..fc0763ce 100644 --- a/extensions/kubeconfig/exec.go +++ b/extensions/kubeconfig/exec.go @@ -2,6 +2,7 @@ package kubeconfig import ( "bytes" + "context" "fmt" "os" "strings" @@ -73,7 +74,7 @@ func KubectlExec(restConfig *restclient.Config, podName, namespace string, comma } logStreamer := &LogStreamer{} - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ Stdin: nil, Stdout: logStreamer, Stderr: os.Stderr, diff --git a/extensions/kubeconfig/kubeconfig.go b/extensions/kubeconfig/kubeconfig.go index c5f2ee94..d2211175 100644 --- a/extensions/kubeconfig/kubeconfig.go +++ b/extensions/kubeconfig/kubeconfig.go @@ -2,9 +2,14 @@ package kubeconfig import ( "errors" + "os" + "github.com/pkg/errors" "github.com/rancher/shepherd/clients/rancher" + "github.com/sirupsen/logrus" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" ) // GetKubeconfig generates a kubeconfig froma specific cluster, and returns it in the form of a *clientcmd.ClientConfig @@ -33,3 +38,100 @@ func GetKubeconfig(client *rancher.Client, clusterID string) (*clientcmd.ClientC return &cfg, nil } + +func GetKubeconfigFromFlags(masterURL, kubeconfigPath string) (*clientcmd.ClientConfig, error) { + if _, err := os.Stat(kubeconfigPath); err != nil { + return nil, errors.Wrap(err, "GetKubeconfigFromFlags: ") + } + kubeConfigContent, err := os.ReadFile(kubeconfigPath) //read the content of file + if err != nil { + return nil, err + } + + clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigContent) + if err != nil { + return nil, err + } + if masterURL != "" { + rawConfig, err := clientConfig.RawConfig() + if err != nil { + return nil, err + } + + clientConfig = clientcmd.NewDefaultClientConfig(rawConfig, &clientcmd.ConfigOverrides{ + ClusterInfo: api.Cluster{ + Server: masterURL, + }, + }) + } + return &clientConfig, err +} + +func GenerateKubeconfigForRestConfig(restConfig *rest.Config, defaultUser, defaultContext, clusterName string) ([]byte, error) { + if defaultUser == "" || defaultContext == "" || clusterName == "" { + return nil, errors.New("GenerateKubeconfigForRestConfig: 'defaultUser', 'defaultContext', and 'clusterName' must all be non-zero strings") + } + clusters := make(map[string]*api.Cluster) + clusters["default-cluster"] = &api.Cluster{ + Server: restConfig.Host, + CertificateAuthorityData: restConfig.CAData, + } + contexts := make(map[string]*api.Context) + contexts["default-context"] = &api.Context{ + Cluster: clusterName, + AuthInfo: defaultUser, + } + authinfos := make(map[string]*api.AuthInfo) + authinfos["default-user"] = &api.AuthInfo{ + ClientCertificateData: restConfig.CertData, + ClientKeyData: restConfig.KeyData, + } + clientConfig := api.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: clusters, + Contexts: contexts, + CurrentContext: defaultContext, + AuthInfos: authinfos, + } + return clientcmd.Write(clientConfig) +} + +func GetKubeConfigBytes(client *rancher.Client, clusterID string) ([]byte, error) { + cluster, err := client.Management.Cluster.ByID(clusterID) + if err != nil { + return nil, err + } + + kubeConfig, err := client.Management.Cluster.ActionGenerateKubeconfig(cluster) + if err != nil { + return nil, err + } + + return []byte(kubeConfig.Config), err +} + +func WriteKubeconfigToFile(client *rancher.Client, clusterID, path string) error { + cluster, err := client.Management.Cluster.ByID(clusterID) + if err != nil { + return err + } + + kubeConfig, err := client.Management.Cluster.ActionGenerateKubeconfig(cluster) + if err != nil { + return err + } + + _, statErr := os.Stat(path) + if statErr == nil { + err = os.Remove(path) + } + + if f, err := os.Create(path); err == nil { + if _, err := f.Write([]byte(kubeConfig.Config)); err != nil { + return err + } + } + logrus.Infof("Finished writing. Err: %v", err) + return err +} diff --git a/extensions/kubeconfig/podlogs.go b/extensions/kubeconfig/podlogs.go index 414787cb..5100914c 100644 --- a/extensions/kubeconfig/podlogs.go +++ b/extensions/kubeconfig/podlogs.go @@ -4,6 +4,8 @@ import ( "bufio" "context" "fmt" + "io" + "os" "strconv" "strings" @@ -14,7 +16,9 @@ import ( restclient "k8s.io/client-go/rest" ) -// GetPodLogs fetches logs from a Kubernetes pod. Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string causes no buffering. +// GetPodLogs fetches logs from a Kubernetes pod +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any func GetPodLogs(client *rancher.Client, clusterID string, podName string, namespace string, bufferSizeStr string) (string, error) { var restConfig *restclient.Config @@ -73,10 +77,71 @@ func GetPodLogs(client *rancher.Client, clusterID string, podName string, namesp return logs, nil } +// GetPodLogsWithOpts fetches logs from a Kubernetes pod and allows +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any +func GetPodLogsWithOpts(client *rancher.Client, clusterID string, podName string, namespace string, bufferSizeStr string, opts *corev1.PodLogOptions) (string, error) { + var restConfig *restclient.Config + + kubeConfig, err := GetKubeconfig(client, clusterID) + if err != nil { + return "", err + } + + restConfig, err = (*kubeConfig).ClientConfig() + if err != nil { + return "", err + } + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) + restConfig.ContentConfig.GroupVersion = &podGroupVersion + restConfig.APIPath = apiPath + + restClient, err := restclient.RESTClientFor(restConfig) + if err != nil { + return "", err + } + + req := restClient.Get().Resource("pods").Name(podName).Namespace(namespace).SubResource("log") + req.VersionedParams( + opts, + k8Scheme.ParameterCodec, + ) + + stream, err := req.Stream(context.TODO()) + if err != nil { + return "", fmt.Errorf("error streaming pod logs for pod %s/%s: %v", namespace, podName, err) + } + + defer stream.Close() + + reader := bufio.NewScanner(stream) + + if bufferSizeStr != "" { + bufferSize, err := parseBufferSize(bufferSizeStr) + if err != nil { + return "", fmt.Errorf("error in parseBufferSize: %v", err) + } + + buf := make([]byte, bufferSize) + reader.Buffer(buf, bufferSize) + } + + var logs string + for reader.Scan() { + logs = logs + fmt.Sprintf("%s\n", reader.Text()) + fmt.Println(reader.Text()) + } + + if err := reader.Err(); err != nil { + return "", fmt.Errorf("error reading pod logs for pod %s/%s: %v", namespace, podName, err) + } + return logs, nil +} + // parseBufferSize is a helper function that parses a size string and returns // the equivalent size in bytes. The provided size string should end with a // suffix of 'KB', 'MB', or 'GB'. If no suffix is provided, the function will -// return an error. +// return an int of the buffer size and an error if any func parseBufferSize(sizeStr string) (int, error) { sizeStr = strings.ToUpper(sizeStr) var mult int @@ -101,3 +166,93 @@ func parseBufferSize(sizeStr string) (int, error) { return size * mult, nil } + +// GetPodLogsWithContext fetches logs from a Kubernetes pod +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any +func GetPodLogsWithContext(ctx context.Context, client *rancher.Client, clusterID, podName, namespace, bufferSizeStr, logFilePath string, opts *corev1.PodLogOptions) (string, error) { + var restConfig *restclient.Config + + kubeConfig, err := GetKubeconfig(client, clusterID) + if err != nil { + return "", err + } + + restConfig, err = (*kubeConfig).ClientConfig() + if err != nil { + return "", err + } + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) + restConfig.ContentConfig.GroupVersion = &podGroupVersion + restConfig.APIPath = apiPath + + restClient, err := restclient.RESTClientFor(restConfig) + if err != nil { + return "", err + } + + req := restClient.Get().Resource("pods").Name(podName).Namespace(namespace).SubResource("log") + req.VersionedParams( + opts, + k8Scheme.ParameterCodec, + ) + + stream, err := req.Stream(context.Background()) + if err != nil { + return "", fmt.Errorf("error streaming pod logs for pod %s/%s: %v", namespace, podName, err) + } + defer stream.Close() + + logFile, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + return "", fmt.Errorf("error opening log file: %v", err) + } + defer logFile.Close() + + return readAndWriteLogsWithContext(ctx, stream, logFile, bufferSizeStr, false) +} + +// readAndWriteLogsWithContext is a helper function that reads and writes text to console output and the specific logFile using a channel +// - you can choose not to print out log lines containing "debug" or "trace" logs, they will still be output to the file pointer +// - if the context is canceled before all logs are read, the function returns immediately with the logs read so far and the context's error +// +// Buffer size (e.g., '64KB', '8MB', '1GB') influences log reading; an empty string results in bufio.Scanner's default of 4096 bytes +// returns a string of all logs read and an error if any +func readAndWriteLogsWithContext(ctx context.Context, stream io.ReadCloser, logFile *os.File, bufferSizeStr string, excludeDebug bool) (string, error) { + logs := &strings.Builder{} + defer logFile.Close() + + scanner := bufio.NewScanner(stream) + if bufferSizeStr != "" { + bufferSize, err := parseBufferSize(bufferSizeStr) + if err != nil { + return "", fmt.Errorf("error in parseBufferSize: %v", err) + } + + buf := make([]byte, bufferSize) + scanner.Buffer(buf, bufferSize) + } + + for scanner.Scan() { + select { + case <-ctx.Done(): + return logs.String(), ctx.Err() // Return immediately if context is canceled + default: + logLine := scanner.Text() + lineHasDebug := strings.Contains(strings.ToLower(logLine), "debug") || strings.Contains(strings.ToLower(logLine), "trace") + if excludeDebug && lineHasDebug { + continue + } + if strings.TrimSpace(logLine) != "" { + fmt.Println(logLine) // Write log to stdout + } + fmt.Fprintln(logFile, logLine) // Write log to file + logs.WriteString(logLine + "\n") + } + } + if err := scanner.Err(); err != nil { + return logs.String(), fmt.Errorf("error reading logs: %v", err) + } + + return logs.String(), nil +} diff --git a/extensions/kubeconfig/pods.go b/extensions/kubeconfig/pods.go new file mode 100644 index 00000000..1eb03e9c --- /dev/null +++ b/extensions/kubeconfig/pods.go @@ -0,0 +1,57 @@ +package kubeconfig + +import ( + "context" + "errors" + + "github.com/rancher/shepherd/clients/rancher" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes" + k8Scheme "k8s.io/client-go/kubernetes/scheme" +) + +func GetPods(client *rancher.Client, clusterID string, namespace string, listOptions *metav1.ListOptions) ([]corev1.Pod, error) { + + kubeConfig, err := GetKubeconfig(client, clusterID) + if err != nil { + return nil, err + } + + restConfig, err := (*kubeConfig).ClientConfig() + if err != nil { + return nil, err + } + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(k8Scheme.Scheme) + restConfig.ContentConfig.GroupVersion = &podGroupVersion + restConfig.APIPath = apiPath + + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, err + } + + pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), *listOptions) + if err != nil { + return nil, err + } + return pods.Items, nil +} + +func GetPodNames(client *rancher.Client, clusterID string, namespace string, listOptions *metav1.ListOptions) ([]string, error) { + pods, err := GetPods(client, clusterID, namespace, listOptions) + if err != nil { + return nil, err + } + + var names []string + for _, pod := range pods { + names = append(names, pod.Name) + } + if len(names) == 0 { + return nil, errors.New("GetPodNames: no pod names found") + } + + return names, nil +} diff --git a/extensions/kubectl/create.go b/extensions/kubectl/create.go new file mode 100644 index 00000000..c5ac209b --- /dev/null +++ b/extensions/kubectl/create.go @@ -0,0 +1,54 @@ +package kubectl + +import ( + "context" + "fmt" + + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/unstructured" + "github.com/rancher/shepherd/pkg/session" + "github.com/sirupsen/logrus" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v1Unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func CreateUnstructured(s *session.Session, client *rancher.Client, content []byte, clusterID, n string, gvr schema.GroupVersionResource) (*v1Unstructured.Unstructured, error) { + dynClient, _, err := setupDynamicClient(s, client, nil, clusterID) + if err != nil { + return nil, err + } + + obj, _, err := v1Unstructured.UnstructuredJSONScheme.Decode(content, nil, nil) + if err != nil { + logrus.Fatal(fmt.Sprintf("Error while decoding YAML object. Err was: %s", err)) + return nil, err + } + + result, err := dynClient.Resource(gvr).Namespace(n).Create(context.TODO(), unstructured.MustToUnstructured(obj), metav1.CreateOptions{}) + if err != nil { + return nil, err + } + return result, nil +} + +func CreateUnstructuredFromFlags(s *session.Session, masterURL, kubeconfigPath string, content []byte, n string, gvr schema.GroupVersionResource) (*v1Unstructured.Unstructured, error) { + dynClient, _, err := setupDynamicClientFromFlags(s, masterURL, kubeconfigPath, nil) + if err != nil { + return nil, err + } + + obj, _, err := v1Unstructured.UnstructuredJSONScheme.Decode(content, nil, nil) + if err != nil { + logrus.Fatal(fmt.Sprintf("Error while decoding YAML object. Err was: %s", err)) + return nil, err + } + + result, err := dynClient.Resource(gvr).Namespace(n).Create(context.TODO(), unstructured.MustToUnstructured(obj), metav1.CreateOptions{}) + if err != nil { + return nil, err + } + return result, nil +} diff --git a/extensions/kubectl/get.go b/extensions/kubectl/get.go new file mode 100644 index 00000000..477f8232 --- /dev/null +++ b/extensions/kubectl/get.go @@ -0,0 +1,39 @@ +package kubectl + +import ( + "context" + + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/pkg/session" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v1Unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func GetUnstructured(s *session.Session, client *rancher.Client, name, clusterID, n string, gvr schema.GroupVersionResource) (*v1Unstructured.Unstructured, error) { + dynClient, _, err := setupDynamicClient(s, client, nil, clusterID) + if err != nil { + return nil, err + } + + result, err := dynClient.Resource(gvr).Namespace(n).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return result, nil +} + +func GetUnstructuredFromFlags(s *session.Session, masterURL, kubeconfigPath, name, clusterID, n string, gvr schema.GroupVersionResource) (*v1Unstructured.Unstructured, error) { + dynClient, _, err := setupDynamicClientFromFlags(s, masterURL, kubeconfigPath, nil) + if err != nil { + return nil, err + } + + result, err := dynClient.Resource(gvr).Namespace(n).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return result, nil +} diff --git a/extensions/kubectl/list.go b/extensions/kubectl/list.go new file mode 100644 index 00000000..ffff4061 --- /dev/null +++ b/extensions/kubectl/list.go @@ -0,0 +1,65 @@ +package kubectl + +import ( + "context" + + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/pkg/session" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v1Unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func ListUnstructured(s *session.Session, client *rancher.Client, name, clusterID, n string, gvr schema.GroupVersionResource, opts metav1.ListOptions) (*v1Unstructured.UnstructuredList, error) { + dynClient, _, err := setupDynamicClient(s, client, nil, clusterID) + if err != nil { + return nil, err + } + + result, err := dynClient.Resource(gvr).Namespace(n).List(context.TODO(), opts) + if err != nil { + return nil, err + } + return result, nil +} + +func ListAllUnstructured(s *session.Session, client *rancher.Client, name, clusterID string, gvr schema.GroupVersionResource, opts metav1.ListOptions) (*v1Unstructured.UnstructuredList, error) { + dynClient, _, err := setupDynamicClient(s, client, nil, clusterID) + if err != nil { + return nil, err + } + + result, err := dynClient.Resource(gvr).List(context.TODO(), opts) + if err != nil { + return nil, err + } + return result, nil +} + +func ListUnstructuredFromFlags(s *session.Session, masterURL, kubeconfigPath, name, clusterID, n string, gvr schema.GroupVersionResource, opts metav1.ListOptions) (*v1Unstructured.UnstructuredList, error) { + dynClient, _, err := setupDynamicClientFromFlags(s, masterURL, kubeconfigPath, nil) + if err != nil { + return nil, err + } + + result, err := dynClient.Resource(gvr).Namespace(n).List(context.TODO(), opts) + if err != nil { + return nil, err + } + return result, nil +} + +func ListAllUnstructuredFromFlags(s *session.Session, masterURL, kubeconfigPath, name, clusterID string, gvr schema.GroupVersionResource, opts metav1.ListOptions) (*v1Unstructured.UnstructuredList, error) { + dynClient, _, err := setupDynamicClientFromFlags(s, masterURL, kubeconfigPath, nil) + if err != nil { + return nil, err + } + + result, err := dynClient.Resource(gvr).List(context.TODO(), opts) + if err != nil { + return nil, err + } + return result, nil +} diff --git a/extensions/kubectl/setup.go b/extensions/kubectl/setup.go new file mode 100644 index 00000000..fb89b1ea --- /dev/null +++ b/extensions/kubectl/setup.go @@ -0,0 +1,65 @@ +package kubectl + +import ( + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/extensions/kubeconfig" + "github.com/rancher/shepherd/pkg/session" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + + shepherdDynamic "github.com/rancher/shepherd/clients/dynamic" +) + +func setupDynamicClient(s *session.Session, client *rancher.Client, scheme *runtime.Scheme, clusterID string) (*shepherdDynamic.Client, *session.Session, error) { + kubeConfig, err := kubeconfig.GetKubeconfig(client, clusterID) + if err != nil { + return nil, s, err + } + + restConfig, err := (*kubeConfig).ClientConfig() + if err != nil { + return nil, s, err + } + + if scheme != nil { + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(scheme) + } + + var session *session.Session + if s == nil { + session = client.Session.NewSession() + } else { + session = s + } + + dynClient, err := shepherdDynamic.NewForConfig(session, restConfig) + + return dynClient, session, err +} + +func setupDynamicClientFromFlags(s *session.Session, masterURL, kubeconfigPath string, scheme *runtime.Scheme) (*shepherdDynamic.Client, *session.Session, error) { + kubeConfig, err := kubeconfig.GetKubeconfigFromFlags(masterURL, kubeconfigPath) + if err != nil { + return nil, s, err + } + + restConfig, err := (*kubeConfig).ClientConfig() + if err != nil { + return nil, s, err + } + + if scheme != nil { + restConfig.ContentConfig.NegotiatedSerializer = serializer.NewCodecFactory(scheme) + } + + var session *session.Session + if s == nil { + session = session.NewSession() + } else { + session = s + } + + dynClient, err := shepherdDynamic.NewForConfig(session, restConfig) + + return dynClient, session, err +} diff --git a/extensions/provisioning/verify.go b/extensions/provisioning/verify.go index d9e347c7..6b35f0e1 100644 --- a/extensions/provisioning/verify.go +++ b/extensions/provisioning/verify.go @@ -103,6 +103,65 @@ func VerifyRKE1Cluster(t *testing.T, client *rancher.Client, clustersConfig *clu } } +// VerifyRKE1ClusterWithTimeout validates that the RKE1 cluster and its resources are in a good state within the custom timeout, matching a given config. +func VerifyRKE1ClusterWithTimeout(t *testing.T, client *rancher.Client, clustersConfig *clusters.ClusterConfig, cluster *management.Cluster, timeout *int64) { + client, err := client.ReLogin() + require.NoError(t, err) + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + require.NoError(t, err) + + var timeoutSeconds *int64 + if timeout != nil { + timeoutSeconds = timeout + } else { + timeoutSeconds = &defaults.WatchTimeoutSeconds + } + + watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ + FieldSelector: "metadata.name=" + cluster.ID, + TimeoutSeconds: timeoutSeconds, + }) + require.NoError(t, err) + + checkFunc := clusters.IsHostedProvisioningClusterReady + err = wait.WatchWait(watchInterface, checkFunc) + require.NoError(t, err) + + assert.Equal(t, clustersConfig.KubernetesVersion, cluster.RancherKubernetesEngineConfig.Version) + + clusterToken, err := clusters.CheckServiceAccountTokenSecret(client, cluster.Name) + require.NoError(t, err) + assert.NotEmpty(t, clusterToken) + + err = nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) + require.NoError(t, err) + + if clustersConfig.PSACT == string(provisioninginput.RancherPrivileged) || clustersConfig.PSACT == string(provisioninginput.RancherRestricted) || clustersConfig.PSACT == string(provisioninginput.RancherBaseline) { + require.NotEmpty(t, cluster.DefaultPodSecurityAdmissionConfigurationTemplateName) + + err := psadeploy.CreateNginxDeployment(client, cluster.ID, clustersConfig.PSACT) + require.NoError(t, err) + } + if clustersConfig.Registries != nil { + if clustersConfig.Registries.RKE1Registries != nil { + for _, registry := range clustersConfig.Registries.RKE1Registries { + havePrefix, err := registries.CheckAllClusterPodsForRegistryPrefix(client, cluster.ID, registry.URL) + require.NoError(t, err) + assert.True(t, havePrefix) + } + } + } + if clustersConfig.Networking != nil { + if clustersConfig.Networking.LocalClusterAuthEndpoint != nil { + VerifyACE(t, adminClient, cluster) + } + } + + podErrors := pods.StatusPods(client, cluster.ID) + assert.Empty(t, podErrors) +} + // VerifyCluster validates that a non-rke1 cluster and its resources are in a good state, matching a given config. func VerifyCluster(t *testing.T, client *rancher.Client, clustersConfig *clusters.ClusterConfig, cluster *steveV1.SteveAPIObject) { client, err := client.ReLogin() @@ -176,6 +235,84 @@ func VerifyCluster(t *testing.T, client *rancher.Client, clustersConfig *cluster } } +// VerifyClusterWithTimeout validates that a non-rke1 cluster and its resources are in a good state within the custom timeout, matching a given config. +func VerifyClusterWithTimeout(t *testing.T, client *rancher.Client, clustersConfig *clusters.ClusterConfig, cluster *steveV1.SteveAPIObject, timeout *int64) { + client, err := client.ReLogin() + require.NoError(t, err) + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + require.NoError(t, err) + + kubeProvisioningClient, err := adminClient.GetKubeAPIProvisioningClient() + require.NoError(t, err) + + var timeoutSeconds *int64 + if timeout != nil { + timeoutSeconds = timeout + } else { + timeoutSeconds = &defaults.WatchTimeoutSeconds + } + + watchInterface, err := kubeProvisioningClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + cluster.Name, + TimeoutSeconds: timeoutSeconds, + }) + require.NoError(t, err) + + checkFunc := clusters.IsProvisioningClusterReady + err = wait.WatchWait(watchInterface, checkFunc) + require.NoError(t, err) + + clusterToken, err := clusters.CheckServiceAccountTokenSecret(client, cluster.Name) + require.NoError(t, err) + assert.NotEmpty(t, clusterToken) + + err = nodestat.AllMachineReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) + require.NoError(t, err) + + status := &provv1.ClusterStatus{} + err = steveV1.ConvertToK8sType(cluster.Status, status) + require.NoError(t, err) + + clusterSpec := &provv1.ClusterSpec{} + err = steveV1.ConvertToK8sType(cluster.Spec, clusterSpec) + require.NoError(t, err) + + configKubeVersion := clusterSpec.KubernetesVersion + require.Equal(t, configKubeVersion, clusterSpec.KubernetesVersion) + + if clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName == string(provisioninginput.RancherPrivileged) || + clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName == string(provisioninginput.RancherRestricted) || + clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName == string(provisioninginput.RancherBaseline) { + + require.NotEmpty(t, clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName) + + err := psadeploy.CreateNginxDeployment(client, status.ClusterName, clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName) + require.NoError(t, err) + } + + if clusterSpec.RKEConfig.Registries != nil { + for registryName := range clusterSpec.RKEConfig.Registries.Configs { + havePrefix, err := registries.CheckAllClusterPodsForRegistryPrefix(client, status.ClusterName, registryName) + require.NoError(t, err) + assert.True(t, havePrefix) + } + } + + if clusterSpec.LocalClusterAuthEndpoint.Enabled { + mgmtClusterObject, err := adminClient.Management.Cluster.ByID(status.ClusterName) + require.NoError(t, err) + VerifyACE(t, adminClient, mgmtClusterObject) + } + + podErrors := pods.StatusPods(client, status.ClusterName) + assert.Empty(t, podErrors) + + if clustersConfig.ClusterSSHTests != nil { + VerifySSHTests(t, client, cluster, clustersConfig.ClusterSSHTests, status.ClusterName) + } +} + // VerifyHostedCluster validates that the hosted cluster and its resources are in a good state, matching a given config. func VerifyHostedCluster(t *testing.T, client *rancher.Client, cluster *management.Cluster) { client, err := client.ReLogin() @@ -206,6 +343,43 @@ func VerifyHostedCluster(t *testing.T, client *rancher.Client, cluster *manageme assert.Empty(t, podErrors) } +// VerifyHostedClusterWithTimeout validates that the hosted cluster and its resources are in a good state within the custom timeout, matching a given config. +func VerifyHostedClusterWithTimeout(t *testing.T, client *rancher.Client, cluster *management.Cluster, timeout *int64) { + client, err := client.ReLogin() + require.NoError(t, err) + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + require.NoError(t, err) + + var timeoutSeconds *int64 + if timeout != nil { + timeoutSeconds = timeout + } else { + timeoutSeconds = &defaults.WatchTimeoutSeconds + } + + watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ + FieldSelector: "metadata.name=" + cluster.ID, + TimeoutSeconds: timeoutSeconds, + }) + require.NoError(t, err) + + checkFunc := clusters.IsHostedProvisioningClusterReady + + err = wait.WatchWait(watchInterface, checkFunc) + require.NoError(t, err) + + clusterToken, err := clusters.CheckServiceAccountTokenSecret(client, cluster.Name) + require.NoError(t, err) + assert.NotEmpty(t, clusterToken) + + err = nodestat.AllManagementNodeReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) + require.NoError(t, err) + + podErrors := pods.StatusPods(client, cluster.ID) + assert.Empty(t, podErrors) +} + // VerifyDeleteRKE1Cluster validates that a rke1 cluster and its resources are deleted. func VerifyDeleteRKE1Cluster(t *testing.T, client *rancher.Client, clusterID string) { cluster, err := client.Management.Cluster.ByID(clusterID) @@ -235,6 +409,42 @@ func VerifyDeleteRKE1Cluster(t *testing.T, client *rancher.Client, clusterID str require.NoError(t, err) } +// VerifyDeleteRKE1Cluster validates that a rke1 cluster and its resources are deleted. +func VerifyDeleteRKE1ClusterWithTimeout(t *testing.T, client *rancher.Client, clusterID string, timeout *int64) { + cluster, err := client.Management.Cluster.ByID(clusterID) + require.NoError(t, err) + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + require.NoError(t, err) + + var timeoutSeconds *int64 + if timeout != nil { + timeoutSeconds = timeout + } else { + timeoutSeconds = &defaults.WatchTimeoutSeconds + } + + watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ + FieldSelector: "metadata.name=" + clusterID, + TimeoutSeconds: timeoutSeconds, + }) + require.NoError(t, err) + + err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) { + if event.Type == watch.Error { + return false, fmt.Errorf("error: unable to delete cluster %s", cluster.Name) + } else if event.Type == watch.Deleted { + logrus.Infof("Cluster %s deleted!", cluster.Name) + return true, nil + } + return false, nil + }) + require.NoError(t, err) + + err = nodestat.AllNodeDeleted(client, clusterID) + require.NoError(t, err) +} + // VerifyDeleteRKE2K3SCluster validates that a non-rke1 cluster and its resources are deleted. func VerifyDeleteRKE2K3SCluster(t *testing.T, client *rancher.Client, clusterID string) { cluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(clusterID) @@ -271,6 +481,49 @@ func VerifyDeleteRKE2K3SCluster(t *testing.T, client *rancher.Client, clusterID require.NoError(t, err) } +// VerifyDeleteRKE2K3SClusterWithTimeout validates that a non-rke1 cluster and its resources are deleted. +func VerifyDeleteRKE2K3SClusterWithTimeout(t *testing.T, client *rancher.Client, clusterID string, timeout *int64) { + cluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(clusterID) + require.NoError(t, err) + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + require.NoError(t, err) + + provKubeClient, err := adminClient.GetKubeAPIProvisioningClient() + require.NoError(t, err) + + var timeoutSeconds *int64 + if timeout != nil { + timeoutSeconds = timeout + } else { + timeoutSeconds = &defaults.WatchTimeoutSeconds + } + + watchInterface, err := provKubeClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + cluster.Name, + TimeoutSeconds: timeoutSeconds, + }) + require.NoError(t, err) + + err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) { + cluster := event.Object.(*provv1.Cluster) + if event.Type == watch.Error { + return false, fmt.Errorf("error: unable to delete cluster %s", cluster.ObjectMeta.Name) + } else if event.Type == watch.Deleted { + logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name) + return true, nil + } else if cluster == nil { + logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name) + return true, nil + } + return false, nil + }) + require.NoError(t, err) + + err = nodestat.AllNodeDeleted(client, clusterID) + require.NoError(t, err) +} + // CertRotationCompleteCheckFunc returns a watch check function that checks if the certificate rotation is complete func CertRotationCompleteCheckFunc(generation int64) wait.WatchCheckFunc { return func(event watch.Event) (bool, error) { @@ -463,6 +716,26 @@ func VerifySnapshots(client *rancher.Client, clusterName string, expectedSnapsho return snapshotToBeRestored, err } +// getSnapshots is a helper function to get the snapshots for a cluster +func GetSnapshots(client *rancher.Client, localclusterID string, clusterName string) ([]string, error) { + steveclient, err := client.Steve.ProxyDownstream(localclusterID) + if err != nil { + return nil, err + } + snapshotSteveObjList, err := steveclient.SteveType("rke.cattle.io.etcdsnapshot").List(nil) + if err != nil { + return nil, err + } + snapshots := []string{} + for _, snapshot := range snapshotSteveObjList.Data { + if strings.Contains(snapshot.ObjectMeta.Name, clusterName) { + snapshots = append(snapshots, snapshot.Name) + } + } + return snapshots, nil + +} + // VerifySSHTests validates the ssh tests listed in the config on each node of the cluster func VerifySSHTests(t *testing.T, client *rancher.Client, clusterObject *steveV1.SteveAPIObject, sshTests []provisioninginput.SSHTestCase, clusterID string) { client, err := client.ReLogin()